2024-12-09 11:20:22,201 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@20b2475a 2024-12-09 11:20:22,215 main DEBUG Took 0.012383 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-09 11:20:22,216 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-09 11:20:22,216 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-09 11:20:22,217 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-09 11:20:22,219 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 11:20:22,227 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-09 11:20:22,242 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 11:20:22,244 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 11:20:22,245 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 11:20:22,245 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 11:20:22,246 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 11:20:22,246 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 11:20:22,247 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 11:20:22,248 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 11:20:22,248 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 11:20:22,249 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 11:20:22,249 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 11:20:22,250 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 11:20:22,250 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 11:20:22,251 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 11:20:22,251 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 11:20:22,252 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 11:20:22,252 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 11:20:22,253 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 11:20:22,253 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 11:20:22,254 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 11:20:22,254 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 11:20:22,254 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 11:20:22,255 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 11:20:22,255 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 11:20:22,256 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 11:20:22,256 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-09 11:20:22,258 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 11:20:22,260 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-09 11:20:22,262 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-09 11:20:22,263 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-09 11:20:22,264 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-09 11:20:22,264 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-09 11:20:22,274 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-09 11:20:22,277 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-09 11:20:22,279 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-09 11:20:22,279 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-09 11:20:22,279 main DEBUG createAppenders(={Console}) 2024-12-09 11:20:22,280 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@20b2475a initialized 2024-12-09 11:20:22,281 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@20b2475a 2024-12-09 11:20:22,281 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@20b2475a OK. 2024-12-09 11:20:22,282 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-09 11:20:22,282 main DEBUG OutputStream closed 2024-12-09 11:20:22,283 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-09 11:20:22,283 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-09 11:20:22,283 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@4310d43 OK 2024-12-09 11:20:22,367 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-09 11:20:22,370 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-09 11:20:22,371 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-09 11:20:22,372 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-09 11:20:22,373 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-09 11:20:22,374 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-09 11:20:22,374 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-09 11:20:22,375 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-09 11:20:22,375 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-09 11:20:22,375 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-09 11:20:22,376 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-09 11:20:22,376 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-09 11:20:22,377 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-09 11:20:22,377 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-09 11:20:22,378 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-09 11:20:22,378 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-09 11:20:22,379 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-09 11:20:22,380 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-09 11:20:22,383 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-09 11:20:22,383 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@12f9af83) with optional ClassLoader: null 2024-12-09 11:20:22,383 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-09 11:20:22,384 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@12f9af83] started OK. 2024-12-09T11:20:22,694 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784 2024-12-09 11:20:22,698 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-09 11:20:22,698 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-09T11:20:22,711 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay timeout: 13 mins 2024-12-09T11:20:22,720 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplayValueCompression timeout: 13 mins 2024-12-09T11:20:22,748 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-09T11:20:22,797 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-09T11:20:22,798 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-09T11:20:22,815 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T11:20:22,831 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/cluster_b02fd66c-c8dc-38fe-f31f-89876c0daa74, deleteOnExit=true 2024-12-09T11:20:22,832 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T11:20:22,832 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/test.cache.data in system properties and HBase conf 2024-12-09T11:20:22,833 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T11:20:22,833 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/hadoop.log.dir in system properties and HBase conf 2024-12-09T11:20:22,834 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T11:20:22,834 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T11:20:22,834 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T11:20:22,941 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-09T11:20:23,074 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T11:20:23,079 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T11:20:23,080 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T11:20:23,081 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T11:20:23,081 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T11:20:23,082 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T11:20:23,082 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T11:20:23,083 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T11:20:23,083 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T11:20:23,084 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T11:20:23,085 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/nfs.dump.dir in system properties and HBase conf 2024-12-09T11:20:23,085 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/java.io.tmpdir in system properties and HBase conf 2024-12-09T11:20:23,086 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T11:20:23,086 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T11:20:23,087 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T11:20:24,183 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-09T11:20:24,279 INFO [Time-limited test {}] log.Log(170): Logging initialized @2956ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-09T11:20:24,373 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T11:20:24,443 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T11:20:24,464 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T11:20:24,464 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T11:20:24,465 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T11:20:24,488 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T11:20:24,490 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4f37ffca{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/hadoop.log.dir/,AVAILABLE} 2024-12-09T11:20:24,491 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6dc9d5c1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T11:20:24,729 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3717288f{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/java.io.tmpdir/jetty-localhost-39203-hadoop-hdfs-3_4_1-tests_jar-_-any-17902328816836189205/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T11:20:24,736 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4bd70930{HTTP/1.1, (http/1.1)}{localhost:39203} 2024-12-09T11:20:24,736 INFO [Time-limited test {}] server.Server(415): Started @3414ms 2024-12-09T11:20:25,209 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T11:20:25,215 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T11:20:25,216 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T11:20:25,216 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T11:20:25,216 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T11:20:25,217 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@cf5a85e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/hadoop.log.dir/,AVAILABLE} 2024-12-09T11:20:25,218 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a359997{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T11:20:25,383 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@330740de{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/java.io.tmpdir/jetty-localhost-33197-hadoop-hdfs-3_4_1-tests_jar-_-any-14458682315430307685/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:20:25,384 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7b24cab9{HTTP/1.1, (http/1.1)}{localhost:33197} 2024-12-09T11:20:25,384 INFO [Time-limited test {}] server.Server(415): Started @4062ms 2024-12-09T11:20:25,440 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T11:20:25,714 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T11:20:25,722 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T11:20:25,726 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T11:20:25,727 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T11:20:25,727 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T11:20:25,728 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@46b092e1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/hadoop.log.dir/,AVAILABLE} 2024-12-09T11:20:25,729 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5cc2d6b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T11:20:25,882 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7bd427b8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/java.io.tmpdir/jetty-localhost-41721-hadoop-hdfs-3_4_1-tests_jar-_-any-12572877372500213352/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:20:25,882 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6915083f{HTTP/1.1, (http/1.1)}{localhost:41721} 2024-12-09T11:20:25,883 INFO [Time-limited test {}] server.Server(415): Started @4560ms 2024-12-09T11:20:25,886 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T11:20:26,003 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T11:20:26,017 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T11:20:26,051 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T11:20:26,051 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T11:20:26,052 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T11:20:26,059 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2a6d5e13{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/hadoop.log.dir/,AVAILABLE} 2024-12-09T11:20:26,060 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@f9972d0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T11:20:26,098 WARN [Thread-106 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/cluster_b02fd66c-c8dc-38fe-f31f-89876c0daa74/data/data3/current/BP-32692473-172.17.0.3-1733743223895/current, will proceed with Du for space computation calculation, 2024-12-09T11:20:26,099 WARN [Thread-105 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/cluster_b02fd66c-c8dc-38fe-f31f-89876c0daa74/data/data1/current/BP-32692473-172.17.0.3-1733743223895/current, will proceed with Du for space computation calculation, 2024-12-09T11:20:26,099 WARN [Thread-108 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/cluster_b02fd66c-c8dc-38fe-f31f-89876c0daa74/data/data4/current/BP-32692473-172.17.0.3-1733743223895/current, will proceed with Du for space computation calculation, 2024-12-09T11:20:26,098 WARN [Thread-107 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/cluster_b02fd66c-c8dc-38fe-f31f-89876c0daa74/data/data2/current/BP-32692473-172.17.0.3-1733743223895/current, will proceed with Du for space computation calculation, 2024-12-09T11:20:26,159 WARN [Thread-83 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T11:20:26,160 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T11:20:26,218 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@35f1150e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/java.io.tmpdir/jetty-localhost-40521-hadoop-hdfs-3_4_1-tests_jar-_-any-8211113629121833850/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:20:26,219 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@13a77e13{HTTP/1.1, (http/1.1)}{localhost:40521} 2024-12-09T11:20:26,219 INFO [Time-limited test {}] server.Server(415): Started @4896ms 2024-12-09T11:20:26,222 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T11:20:26,225 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x801a331f1393ee5 with lease ID 0x2507af1c9947e629: Processing first storage report for DS-8f8100b1-22b1-43bf-9468-405dfca7481e from datanode DatanodeRegistration(127.0.0.1:46359, datanodeUuid=4ec6cf5b-f9e4-4fa5-9dd8-0357ae5c8876, infoPort=39985, infoSecurePort=0, ipcPort=34527, storageInfo=lv=-57;cid=testClusterID;nsid=1209891129;c=1733743223895) 2024-12-09T11:20:26,226 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x801a331f1393ee5 with lease ID 0x2507af1c9947e629: from storage DS-8f8100b1-22b1-43bf-9468-405dfca7481e node DatanodeRegistration(127.0.0.1:46359, datanodeUuid=4ec6cf5b-f9e4-4fa5-9dd8-0357ae5c8876, infoPort=39985, infoSecurePort=0, ipcPort=34527, storageInfo=lv=-57;cid=testClusterID;nsid=1209891129;c=1733743223895), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T11:20:26,227 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x935c1acb1011da7d with lease ID 0x2507af1c9947e628: Processing first storage report for DS-049b5d3a-b506-46ca-8e7a-3fb74c0e7d3e from datanode DatanodeRegistration(127.0.0.1:44093, datanodeUuid=db252fff-4d65-475d-a955-c1284fc1dd58, infoPort=43581, infoSecurePort=0, ipcPort=45319, storageInfo=lv=-57;cid=testClusterID;nsid=1209891129;c=1733743223895) 2024-12-09T11:20:26,227 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x935c1acb1011da7d with lease ID 0x2507af1c9947e628: from storage DS-049b5d3a-b506-46ca-8e7a-3fb74c0e7d3e node DatanodeRegistration(127.0.0.1:44093, datanodeUuid=db252fff-4d65-475d-a955-c1284fc1dd58, infoPort=43581, infoSecurePort=0, ipcPort=45319, storageInfo=lv=-57;cid=testClusterID;nsid=1209891129;c=1733743223895), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T11:20:26,227 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x801a331f1393ee5 with lease ID 0x2507af1c9947e629: Processing first storage report for DS-d88fa068-e8b1-4268-9ebe-5ca4f0fc80a9 from datanode DatanodeRegistration(127.0.0.1:46359, datanodeUuid=4ec6cf5b-f9e4-4fa5-9dd8-0357ae5c8876, infoPort=39985, infoSecurePort=0, ipcPort=34527, storageInfo=lv=-57;cid=testClusterID;nsid=1209891129;c=1733743223895) 2024-12-09T11:20:26,227 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x801a331f1393ee5 with lease ID 0x2507af1c9947e629: from storage DS-d88fa068-e8b1-4268-9ebe-5ca4f0fc80a9 node DatanodeRegistration(127.0.0.1:46359, datanodeUuid=4ec6cf5b-f9e4-4fa5-9dd8-0357ae5c8876, infoPort=39985, infoSecurePort=0, ipcPort=34527, storageInfo=lv=-57;cid=testClusterID;nsid=1209891129;c=1733743223895), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T11:20:26,227 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x935c1acb1011da7d with lease ID 0x2507af1c9947e628: Processing first storage report for DS-54c6eb09-b670-416b-abad-8c342b924744 from datanode DatanodeRegistration(127.0.0.1:44093, datanodeUuid=db252fff-4d65-475d-a955-c1284fc1dd58, infoPort=43581, infoSecurePort=0, ipcPort=45319, storageInfo=lv=-57;cid=testClusterID;nsid=1209891129;c=1733743223895) 2024-12-09T11:20:26,228 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x935c1acb1011da7d with lease ID 0x2507af1c9947e628: from storage DS-54c6eb09-b670-416b-abad-8c342b924744 node DatanodeRegistration(127.0.0.1:44093, datanodeUuid=db252fff-4d65-475d-a955-c1284fc1dd58, infoPort=43581, infoSecurePort=0, ipcPort=45319, storageInfo=lv=-57;cid=testClusterID;nsid=1209891129;c=1733743223895), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T11:20:26,371 WARN [Thread-140 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/cluster_b02fd66c-c8dc-38fe-f31f-89876c0daa74/data/data6/current/BP-32692473-172.17.0.3-1733743223895/current, will proceed with Du for space computation calculation, 2024-12-09T11:20:26,372 WARN [Thread-139 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/cluster_b02fd66c-c8dc-38fe-f31f-89876c0daa74/data/data5/current/BP-32692473-172.17.0.3-1733743223895/current, will proceed with Du for space computation calculation, 2024-12-09T11:20:26,407 WARN [Thread-129 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T11:20:26,416 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xea89dfcd7c120286 with lease ID 0x2507af1c9947e62a: Processing first storage report for DS-f790a494-97c1-4864-a7b1-6442795d840b from datanode DatanodeRegistration(127.0.0.1:34459, datanodeUuid=44c08323-bdb9-45d4-9f31-9a5f0d6767d1, infoPort=39935, infoSecurePort=0, ipcPort=45857, storageInfo=lv=-57;cid=testClusterID;nsid=1209891129;c=1733743223895) 2024-12-09T11:20:26,416 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xea89dfcd7c120286 with lease ID 0x2507af1c9947e62a: from storage DS-f790a494-97c1-4864-a7b1-6442795d840b node DatanodeRegistration(127.0.0.1:34459, datanodeUuid=44c08323-bdb9-45d4-9f31-9a5f0d6767d1, infoPort=39935, infoSecurePort=0, ipcPort=45857, storageInfo=lv=-57;cid=testClusterID;nsid=1209891129;c=1733743223895), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T11:20:26,417 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xea89dfcd7c120286 with lease ID 0x2507af1c9947e62a: Processing first storage report for DS-fd6652f5-213a-41ec-8d6a-6b00cc2a30ba from datanode DatanodeRegistration(127.0.0.1:34459, datanodeUuid=44c08323-bdb9-45d4-9f31-9a5f0d6767d1, infoPort=39935, infoSecurePort=0, ipcPort=45857, storageInfo=lv=-57;cid=testClusterID;nsid=1209891129;c=1733743223895) 2024-12-09T11:20:26,417 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xea89dfcd7c120286 with lease ID 0x2507af1c9947e62a: from storage DS-fd6652f5-213a-41ec-8d6a-6b00cc2a30ba node DatanodeRegistration(127.0.0.1:34459, datanodeUuid=44c08323-bdb9-45d4-9f31-9a5f0d6767d1, infoPort=39935, infoSecurePort=0, ipcPort=45857, storageInfo=lv=-57;cid=testClusterID;nsid=1209891129;c=1733743223895), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T11:20:26,638 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784 2024-12-09T11:20:26,722 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/cluster_b02fd66c-c8dc-38fe-f31f-89876c0daa74/zookeeper_0, clientPort=56083, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/cluster_b02fd66c-c8dc-38fe-f31f-89876c0daa74/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/cluster_b02fd66c-c8dc-38fe-f31f-89876c0daa74/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T11:20:26,734 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56083 2024-12-09T11:20:26,744 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:20:26,747 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:20:27,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741825_1001 (size=7) 2024-12-09T11:20:27,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741825_1001 (size=7) 2024-12-09T11:20:27,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741825_1001 (size=7) 2024-12-09T11:20:27,433 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de with version=8 2024-12-09T11:20:27,433 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/hbase-staging 2024-12-09T11:20:27,831 INFO [Time-limited test {}] client.ConnectionUtils(128): master/2dff3a36d44f:0 server-side Connection retries=45 2024-12-09T11:20:27,845 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T11:20:27,846 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T11:20:27,854 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T11:20:27,855 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T11:20:27,855 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T11:20:28,075 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T11:20:28,150 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-09T11:20:28,162 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-09T11:20:28,167 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T11:20:28,201 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 18739 (auto-detected) 2024-12-09T11:20:28,202 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:03 (auto-detected) 2024-12-09T11:20:28,229 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:42781 2024-12-09T11:20:28,258 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42781 connecting to ZooKeeper ensemble=127.0.0.1:56083 2024-12-09T11:20:28,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:427810x0, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T11:20:28,340 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42781-0x1012ae9bf670000 connected 2024-12-09T11:20:28,419 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:20:28,423 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:20:28,470 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42781-0x1012ae9bf670000, quorum=127.0.0.1:56083, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T11:20:28,476 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de, hbase.cluster.distributed=false 2024-12-09T11:20:28,509 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42781-0x1012ae9bf670000, quorum=127.0.0.1:56083, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T11:20:28,517 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42781 2024-12-09T11:20:28,522 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42781 2024-12-09T11:20:28,523 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42781 2024-12-09T11:20:28,562 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42781 2024-12-09T11:20:28,566 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42781 2024-12-09T11:20:28,708 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2dff3a36d44f:0 server-side Connection retries=45 2024-12-09T11:20:28,711 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T11:20:28,711 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T11:20:28,712 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T11:20:28,712 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T11:20:28,712 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T11:20:28,716 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T11:20:28,719 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T11:20:28,720 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:46259 2024-12-09T11:20:28,722 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46259 connecting to ZooKeeper ensemble=127.0.0.1:56083 2024-12-09T11:20:28,724 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:20:28,730 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:20:28,738 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:462590x0, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T11:20:28,739 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:462590x0, quorum=127.0.0.1:56083, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T11:20:28,743 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46259-0x1012ae9bf670001 connected 2024-12-09T11:20:28,744 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T11:20:28,754 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T11:20:28,757 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46259-0x1012ae9bf670001, quorum=127.0.0.1:56083, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T11:20:28,765 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46259-0x1012ae9bf670001, quorum=127.0.0.1:56083, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T11:20:28,766 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46259 2024-12-09T11:20:28,767 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46259 2024-12-09T11:20:28,770 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46259 2024-12-09T11:20:28,774 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46259 2024-12-09T11:20:28,774 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46259 2024-12-09T11:20:28,796 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2dff3a36d44f:0 server-side Connection retries=45 2024-12-09T11:20:28,796 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T11:20:28,796 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T11:20:28,797 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T11:20:28,797 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T11:20:28,797 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T11:20:28,797 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T11:20:28,798 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T11:20:28,799 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:39663 2024-12-09T11:20:28,801 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39663 connecting to ZooKeeper ensemble=127.0.0.1:56083 2024-12-09T11:20:28,802 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:20:28,807 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:20:28,815 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:396630x0, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T11:20:28,816 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:396630x0, quorum=127.0.0.1:56083, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T11:20:28,817 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T11:20:28,819 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39663-0x1012ae9bf670002 connected 2024-12-09T11:20:28,827 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T11:20:28,829 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39663-0x1012ae9bf670002, quorum=127.0.0.1:56083, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T11:20:28,832 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39663-0x1012ae9bf670002, quorum=127.0.0.1:56083, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T11:20:28,842 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39663 2024-12-09T11:20:28,843 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39663 2024-12-09T11:20:28,843 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39663 2024-12-09T11:20:28,844 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39663 2024-12-09T11:20:28,845 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39663 2024-12-09T11:20:28,872 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2dff3a36d44f:0 server-side Connection retries=45 2024-12-09T11:20:28,872 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T11:20:28,872 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T11:20:28,872 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T11:20:28,872 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T11:20:28,873 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T11:20:28,873 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T11:20:28,873 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T11:20:28,874 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:46367 2024-12-09T11:20:28,875 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46367 connecting to ZooKeeper ensemble=127.0.0.1:56083 2024-12-09T11:20:28,877 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:20:28,879 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:20:28,887 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:463670x0, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T11:20:28,888 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:463670x0, quorum=127.0.0.1:56083, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T11:20:28,888 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T11:20:28,889 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46367-0x1012ae9bf670003 connected 2024-12-09T11:20:28,891 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T11:20:28,892 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46367-0x1012ae9bf670003, quorum=127.0.0.1:56083, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T11:20:28,894 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46367-0x1012ae9bf670003, quorum=127.0.0.1:56083, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T11:20:28,897 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46367 2024-12-09T11:20:28,897 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46367 2024-12-09T11:20:28,897 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46367 2024-12-09T11:20:28,898 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46367 2024-12-09T11:20:28,901 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46367 2024-12-09T11:20:28,918 DEBUG [M:0;2dff3a36d44f:42781 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;2dff3a36d44f:42781 2024-12-09T11:20:28,919 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/2dff3a36d44f,42781,1733743227566 2024-12-09T11:20:28,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42781-0x1012ae9bf670000, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T11:20:28,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46259-0x1012ae9bf670001, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T11:20:28,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46367-0x1012ae9bf670003, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T11:20:28,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39663-0x1012ae9bf670002, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T11:20:28,929 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42781-0x1012ae9bf670000, quorum=127.0.0.1:56083, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/2dff3a36d44f,42781,1733743227566 2024-12-09T11:20:28,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46259-0x1012ae9bf670001, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T11:20:28,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39663-0x1012ae9bf670002, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T11:20:28,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42781-0x1012ae9bf670000, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:20:28,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46259-0x1012ae9bf670001, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:20:28,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39663-0x1012ae9bf670002, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:20:28,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46367-0x1012ae9bf670003, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T11:20:28,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46367-0x1012ae9bf670003, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:20:28,958 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42781-0x1012ae9bf670000, quorum=127.0.0.1:56083, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T11:20:28,960 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/2dff3a36d44f,42781,1733743227566 from backup master directory 2024-12-09T11:20:28,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42781-0x1012ae9bf670000, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/2dff3a36d44f,42781,1733743227566 2024-12-09T11:20:28,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46367-0x1012ae9bf670003, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T11:20:28,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42781-0x1012ae9bf670000, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T11:20:28,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46259-0x1012ae9bf670001, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T11:20:28,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39663-0x1012ae9bf670002, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T11:20:28,965 WARN [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T11:20:28,965 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=2dff3a36d44f,42781,1733743227566 2024-12-09T11:20:28,969 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-09T11:20:28,970 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-09T11:20:29,034 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/hbase.id] with ID: ed218c8a-0bff-4a39-941d-68ae72b73aae 2024-12-09T11:20:29,035 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/.tmp/hbase.id 2024-12-09T11:20:29,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741826_1002 (size=42) 2024-12-09T11:20:29,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741826_1002 (size=42) 2024-12-09T11:20:29,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741826_1002 (size=42) 2024-12-09T11:20:29,050 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/.tmp/hbase.id]:[hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/hbase.id] 2024-12-09T11:20:29,108 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:20:29,114 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T11:20:29,142 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 25ms. 2024-12-09T11:20:29,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46367-0x1012ae9bf670003, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:20:29,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46259-0x1012ae9bf670001, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:20:29,148 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39663-0x1012ae9bf670002, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:20:29,148 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42781-0x1012ae9bf670000, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:20:29,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741827_1003 (size=196) 2024-12-09T11:20:29,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741827_1003 (size=196) 2024-12-09T11:20:29,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741827_1003 (size=196) 2024-12-09T11:20:29,195 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T11:20:29,198 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T11:20:29,205 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T11:20:29,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741828_1004 (size=1189) 2024-12-09T11:20:29,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741828_1004 (size=1189) 2024-12-09T11:20:29,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741828_1004 (size=1189) 2024-12-09T11:20:29,271 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/MasterData/data/master/store 2024-12-09T11:20:29,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741829_1005 (size=34) 2024-12-09T11:20:29,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741829_1005 (size=34) 2024-12-09T11:20:29,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741829_1005 (size=34) 2024-12-09T11:20:29,308 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-09T11:20:29,312 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:20:29,313 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T11:20:29,313 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:20:29,313 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:20:29,315 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T11:20:29,315 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:20:29,315 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:20:29,316 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733743229313Disabling compacts and flushes for region at 1733743229313Disabling writes for close at 1733743229315 (+2 ms)Writing region close event to WAL at 1733743229315Closed at 1733743229315 2024-12-09T11:20:29,318 WARN [master/2dff3a36d44f:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/MasterData/data/master/store/.initializing 2024-12-09T11:20:29,318 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/MasterData/WALs/2dff3a36d44f,42781,1733743227566 2024-12-09T11:20:29,328 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T11:20:29,348 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2dff3a36d44f%2C42781%2C1733743227566, suffix=, logDir=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/MasterData/WALs/2dff3a36d44f,42781,1733743227566, archiveDir=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/MasterData/oldWALs, maxLogs=10 2024-12-09T11:20:29,397 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/MasterData/WALs/2dff3a36d44f,42781,1733743227566/2dff3a36d44f%2C42781%2C1733743227566.1733743229354, exclude list is [], retry=0 2024-12-09T11:20:29,416 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:20:29,420 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34459,DS-f790a494-97c1-4864-a7b1-6442795d840b,DISK] 2024-12-09T11:20:29,420 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46359,DS-8f8100b1-22b1-43bf-9468-405dfca7481e,DISK] 2024-12-09T11:20:29,420 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44093,DS-049b5d3a-b506-46ca-8e7a-3fb74c0e7d3e,DISK] 2024-12-09T11:20:29,424 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-09T11:20:29,465 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/MasterData/WALs/2dff3a36d44f,42781,1733743227566/2dff3a36d44f%2C42781%2C1733743227566.1733743229354 2024-12-09T11:20:29,466 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43581:43581),(127.0.0.1/127.0.0.1:39935:39935),(127.0.0.1/127.0.0.1:39985:39985)] 2024-12-09T11:20:29,467 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T11:20:29,467 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:20:29,470 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:20:29,471 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:20:29,507 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:20:29,532 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T11:20:29,536 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:29,538 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:20:29,539 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:20:29,542 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T11:20:29,542 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:29,543 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:29,543 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:20:29,545 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T11:20:29,545 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:29,546 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:29,547 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:20:29,549 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T11:20:29,549 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:29,550 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:29,550 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:20:29,553 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:20:29,554 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:20:29,559 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:20:29,560 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:20:29,563 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T11:20:29,566 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:20:29,570 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T11:20:29,571 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61189034, jitterRate=-0.08821234107017517}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T11:20:29,577 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733743229483Initializing all the Stores at 1733743229485 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743229485Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743229486 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743229486Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743229487 (+1 ms)Cleaning up temporary data from old regions at 1733743229560 (+73 ms)Region opened successfully at 1733743229577 (+17 ms) 2024-12-09T11:20:29,578 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T11:20:29,611 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@585ae432, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2dff3a36d44f/172.17.0.3:0 2024-12-09T11:20:29,643 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T11:20:29,654 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T11:20:29,655 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T11:20:29,657 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T11:20:29,659 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-09T11:20:29,664 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-12-09T11:20:29,664 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T11:20:29,688 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T11:20:29,696 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42781-0x1012ae9bf670000, quorum=127.0.0.1:56083, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T11:20:29,700 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T11:20:29,702 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T11:20:29,704 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42781-0x1012ae9bf670000, quorum=127.0.0.1:56083, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T11:20:29,705 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T11:20:29,707 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T11:20:29,710 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42781-0x1012ae9bf670000, quorum=127.0.0.1:56083, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T11:20:29,712 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T11:20:29,713 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42781-0x1012ae9bf670000, quorum=127.0.0.1:56083, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T11:20:29,715 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T11:20:29,731 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42781-0x1012ae9bf670000, quorum=127.0.0.1:56083, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T11:20:29,734 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T11:20:29,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46367-0x1012ae9bf670003, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T11:20:29,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39663-0x1012ae9bf670002, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T11:20:29,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42781-0x1012ae9bf670000, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T11:20:29,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46259-0x1012ae9bf670001, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T11:20:29,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46367-0x1012ae9bf670003, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:20:29,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39663-0x1012ae9bf670002, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:20:29,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42781-0x1012ae9bf670000, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:20:29,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46259-0x1012ae9bf670001, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:20:29,741 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=2dff3a36d44f,42781,1733743227566, sessionid=0x1012ae9bf670000, setting cluster-up flag (Was=false) 2024-12-09T11:20:29,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46367-0x1012ae9bf670003, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:20:29,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42781-0x1012ae9bf670000, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:20:29,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46259-0x1012ae9bf670001, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:20:29,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39663-0x1012ae9bf670002, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:20:29,759 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T11:20:29,760 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2dff3a36d44f,42781,1733743227566 2024-12-09T11:20:29,767 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42781-0x1012ae9bf670000, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:20:29,767 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46367-0x1012ae9bf670003, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:20:29,767 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46259-0x1012ae9bf670001, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:20:29,767 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39663-0x1012ae9bf670002, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:20:29,772 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T11:20:29,774 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2dff3a36d44f,42781,1733743227566 2024-12-09T11:20:29,780 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T11:20:29,806 INFO [RS:0;2dff3a36d44f:46259 {}] regionserver.HRegionServer(746): ClusterId : ed218c8a-0bff-4a39-941d-68ae72b73aae 2024-12-09T11:20:29,807 INFO [RS:2;2dff3a36d44f:46367 {}] regionserver.HRegionServer(746): ClusterId : ed218c8a-0bff-4a39-941d-68ae72b73aae 2024-12-09T11:20:29,807 INFO [RS:1;2dff3a36d44f:39663 {}] regionserver.HRegionServer(746): ClusterId : ed218c8a-0bff-4a39-941d-68ae72b73aae 2024-12-09T11:20:29,809 DEBUG [RS:0;2dff3a36d44f:46259 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T11:20:29,809 DEBUG [RS:2;2dff3a36d44f:46367 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T11:20:29,809 DEBUG [RS:1;2dff3a36d44f:39663 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T11:20:29,819 DEBUG [RS:2;2dff3a36d44f:46367 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T11:20:29,819 DEBUG [RS:0;2dff3a36d44f:46259 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T11:20:29,819 DEBUG [RS:1;2dff3a36d44f:39663 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T11:20:29,819 DEBUG [RS:2;2dff3a36d44f:46367 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T11:20:29,819 DEBUG [RS:1;2dff3a36d44f:39663 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T11:20:29,819 DEBUG [RS:0;2dff3a36d44f:46259 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T11:20:29,823 DEBUG [RS:2;2dff3a36d44f:46367 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T11:20:29,823 DEBUG [RS:2;2dff3a36d44f:46367 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66dae4f9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2dff3a36d44f/172.17.0.3:0 2024-12-09T11:20:29,826 DEBUG [RS:0;2dff3a36d44f:46259 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T11:20:29,826 DEBUG [RS:0;2dff3a36d44f:46259 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4a5330d5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2dff3a36d44f/172.17.0.3:0 2024-12-09T11:20:29,827 DEBUG [RS:1;2dff3a36d44f:39663 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T11:20:29,827 DEBUG [RS:1;2dff3a36d44f:39663 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b51f4c6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2dff3a36d44f/172.17.0.3:0 2024-12-09T11:20:29,842 DEBUG [RS:2;2dff3a36d44f:46367 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;2dff3a36d44f:46367 2024-12-09T11:20:29,846 INFO [RS:2;2dff3a36d44f:46367 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T11:20:29,846 INFO [RS:2;2dff3a36d44f:46367 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T11:20:29,846 DEBUG [RS:2;2dff3a36d44f:46367 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T11:20:29,849 INFO [RS:2;2dff3a36d44f:46367 {}] regionserver.HRegionServer(2659): reportForDuty to master=2dff3a36d44f,42781,1733743227566 with port=46367, startcode=1733743228871 2024-12-09T11:20:29,851 DEBUG [RS:1;2dff3a36d44f:39663 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;2dff3a36d44f:39663 2024-12-09T11:20:29,851 INFO [RS:1;2dff3a36d44f:39663 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T11:20:29,851 INFO [RS:1;2dff3a36d44f:39663 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T11:20:29,851 DEBUG [RS:1;2dff3a36d44f:39663 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T11:20:29,853 INFO [RS:1;2dff3a36d44f:39663 {}] regionserver.HRegionServer(2659): reportForDuty to master=2dff3a36d44f,42781,1733743227566 with port=39663, startcode=1733743228795 2024-12-09T11:20:29,853 DEBUG [RS:0;2dff3a36d44f:46259 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;2dff3a36d44f:46259 2024-12-09T11:20:29,853 INFO [RS:0;2dff3a36d44f:46259 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T11:20:29,853 INFO [RS:0;2dff3a36d44f:46259 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T11:20:29,853 DEBUG [RS:0;2dff3a36d44f:46259 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T11:20:29,855 INFO [RS:0;2dff3a36d44f:46259 {}] regionserver.HRegionServer(2659): reportForDuty to master=2dff3a36d44f,42781,1733743227566 with port=46259, startcode=1733743228656 2024-12-09T11:20:29,866 DEBUG [RS:2;2dff3a36d44f:46367 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T11:20:29,866 DEBUG [RS:1;2dff3a36d44f:39663 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T11:20:29,870 DEBUG [RS:0;2dff3a36d44f:46259 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T11:20:29,885 INFO [AsyncFSWAL-0-hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/MasterData-prefix:2dff3a36d44f,42781,1733743227566 {}] compress.Compression(560): Loaded codec org.apache.hadoop.hbase.io.compress.ReusableStreamGzipCodec for compression algorithm GZ 2024-12-09T11:20:29,918 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T11:20:29,925 INFO [HMaster-EventLoopGroup-2-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:44523, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T11:20:29,925 INFO [HMaster-EventLoopGroup-2-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55501, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T11:20:29,927 INFO [HMaster-EventLoopGroup-2-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41401, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T11:20:29,933 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T11:20:29,935 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42781 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-12-09T11:20:29,942 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42781 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-12-09T11:20:29,942 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42781 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-12-09T11:20:29,943 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T11:20:29,949 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 2dff3a36d44f,42781,1733743227566 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T11:20:29,961 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/2dff3a36d44f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T11:20:29,961 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/2dff3a36d44f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T11:20:29,961 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/2dff3a36d44f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T11:20:29,962 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/2dff3a36d44f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T11:20:29,962 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/2dff3a36d44f:0, corePoolSize=10, maxPoolSize=10 2024-12-09T11:20:29,962 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:20:29,963 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/2dff3a36d44f:0, corePoolSize=2, maxPoolSize=2 2024-12-09T11:20:29,963 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:20:29,970 DEBUG [RS:1;2dff3a36d44f:39663 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-09T11:20:29,970 DEBUG [RS:0;2dff3a36d44f:46259 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-09T11:20:29,970 DEBUG [RS:2;2dff3a36d44f:46367 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-09T11:20:29,971 WARN [RS:1;2dff3a36d44f:39663 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-09T11:20:29,971 WARN [RS:2;2dff3a36d44f:46367 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-09T11:20:29,971 WARN [RS:0;2dff3a36d44f:46259 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-09T11:20:29,984 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733743259984 2024-12-09T11:20:29,986 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T11:20:29,987 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T11:20:29,988 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T11:20:29,988 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T11:20:29,991 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T11:20:29,991 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T11:20:29,992 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T11:20:29,992 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T11:20:29,993 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T11:20:29,996 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:29,996 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T11:20:30,000 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T11:20:30,001 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T11:20:30,002 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T11:20:30,003 WARN [IPC Server handler 2 on default port 40493 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T11:20:30,004 WARN [IPC Server handler 2 on default port 40493 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T11:20:30,004 WARN [IPC Server handler 2 on default port 40493 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T11:20:30,005 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T11:20:30,005 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T11:20:30,007 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/2dff3a36d44f:0:becomeActiveMaster-HFileCleaner.large.0-1733743230006,5,FailOnTimeoutGroup] 2024-12-09T11:20:30,008 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/2dff3a36d44f:0:becomeActiveMaster-HFileCleaner.small.0-1733743230007,5,FailOnTimeoutGroup] 2024-12-09T11:20:30,008 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T11:20:30,008 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T11:20:30,009 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T11:20:30,009 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T11:20:30,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741831_1007 (size=1321) 2024-12-09T11:20:30,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741831_1007 (size=1321) 2024-12-09T11:20:30,020 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T11:20:30,021 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de 2024-12-09T11:20:30,026 WARN [IPC Server handler 0 on default port 40493 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T11:20:30,026 WARN [IPC Server handler 0 on default port 40493 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T11:20:30,026 WARN [IPC Server handler 0 on default port 40493 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T11:20:30,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741832_1008 (size=32) 2024-12-09T11:20:30,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741832_1008 (size=32) 2024-12-09T11:20:30,040 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:20:30,042 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T11:20:30,045 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T11:20:30,045 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:30,046 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:20:30,046 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T11:20:30,049 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T11:20:30,049 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:30,050 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:20:30,050 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T11:20:30,052 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T11:20:30,052 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:30,053 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:20:30,053 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T11:20:30,056 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T11:20:30,056 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:30,059 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:20:30,059 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T11:20:30,061 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/hbase/meta/1588230740 2024-12-09T11:20:30,062 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/hbase/meta/1588230740 2024-12-09T11:20:30,065 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T11:20:30,065 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T11:20:30,066 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T11:20:30,068 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T11:20:30,072 INFO [RS:0;2dff3a36d44f:46259 {}] regionserver.HRegionServer(2659): reportForDuty to master=2dff3a36d44f,42781,1733743227566 with port=46259, startcode=1733743228656 2024-12-09T11:20:30,072 INFO [RS:1;2dff3a36d44f:39663 {}] regionserver.HRegionServer(2659): reportForDuty to master=2dff3a36d44f,42781,1733743227566 with port=39663, startcode=1733743228795 2024-12-09T11:20:30,072 INFO [RS:2;2dff3a36d44f:46367 {}] regionserver.HRegionServer(2659): reportForDuty to master=2dff3a36d44f,42781,1733743227566 with port=46367, startcode=1733743228871 2024-12-09T11:20:30,073 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T11:20:30,074 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42781 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2dff3a36d44f,39663,1733743228795 2024-12-09T11:20:30,076 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42781 {}] master.ServerManager(517): Registering regionserver=2dff3a36d44f,39663,1733743228795 2024-12-09T11:20:30,079 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65152174, jitterRate=-0.02915695309638977}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T11:20:30,082 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733743230040Initializing all the Stores at 1733743230042 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743230042Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743230042Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743230042Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743230042Cleaning up temporary data from old regions at 1733743230065 (+23 ms)Region opened successfully at 1733743230082 (+17 ms) 2024-12-09T11:20:30,082 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T11:20:30,082 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T11:20:30,082 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T11:20:30,083 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T11:20:30,083 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T11:20:30,084 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T11:20:30,084 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733743230082Disabling compacts and flushes for region at 1733743230082Disabling writes for close at 1733743230083 (+1 ms)Writing region close event to WAL at 1733743230084 (+1 ms)Closed at 1733743230084 2024-12-09T11:20:30,085 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42781 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2dff3a36d44f,46259,1733743228656 2024-12-09T11:20:30,085 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42781 {}] master.ServerManager(517): Registering regionserver=2dff3a36d44f,46259,1733743228656 2024-12-09T11:20:30,088 DEBUG [RS:1;2dff3a36d44f:39663 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de 2024-12-09T11:20:30,088 DEBUG [RS:1;2dff3a36d44f:39663 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40493 2024-12-09T11:20:30,088 DEBUG [RS:1;2dff3a36d44f:39663 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T11:20:30,088 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T11:20:30,089 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T11:20:30,089 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42781 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2dff3a36d44f,46367,1733743228871 2024-12-09T11:20:30,089 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42781 {}] master.ServerManager(517): Registering regionserver=2dff3a36d44f,46367,1733743228871 2024-12-09T11:20:30,089 DEBUG [RS:0;2dff3a36d44f:46259 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de 2024-12-09T11:20:30,089 DEBUG [RS:0;2dff3a36d44f:46259 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40493 2024-12-09T11:20:30,090 DEBUG [RS:0;2dff3a36d44f:46259 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T11:20:30,094 DEBUG [RS:2;2dff3a36d44f:46367 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de 2024-12-09T11:20:30,094 DEBUG [RS:2;2dff3a36d44f:46367 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40493 2024-12-09T11:20:30,094 DEBUG [RS:2;2dff3a36d44f:46367 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T11:20:30,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42781-0x1012ae9bf670000, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T11:20:30,096 DEBUG [RS:1;2dff3a36d44f:39663 {}] zookeeper.ZKUtil(111): regionserver:39663-0x1012ae9bf670002, quorum=127.0.0.1:56083, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2dff3a36d44f,39663,1733743228795 2024-12-09T11:20:30,096 DEBUG [RS:0;2dff3a36d44f:46259 {}] zookeeper.ZKUtil(111): regionserver:46259-0x1012ae9bf670001, quorum=127.0.0.1:56083, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2dff3a36d44f,46259,1733743228656 2024-12-09T11:20:30,097 WARN [RS:1;2dff3a36d44f:39663 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T11:20:30,097 WARN [RS:0;2dff3a36d44f:46259 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T11:20:30,097 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T11:20:30,097 INFO [RS:1;2dff3a36d44f:39663 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T11:20:30,097 INFO [RS:0;2dff3a36d44f:46259 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T11:20:30,097 DEBUG [RS:1;2dff3a36d44f:39663 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/WALs/2dff3a36d44f,39663,1733743228795 2024-12-09T11:20:30,097 DEBUG [RS:0;2dff3a36d44f:46259 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/WALs/2dff3a36d44f,46259,1733743228656 2024-12-09T11:20:30,099 DEBUG [RS:2;2dff3a36d44f:46367 {}] zookeeper.ZKUtil(111): regionserver:46367-0x1012ae9bf670003, quorum=127.0.0.1:56083, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2dff3a36d44f,46367,1733743228871 2024-12-09T11:20:30,100 WARN [RS:2;2dff3a36d44f:46367 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T11:20:30,100 INFO [RS:2;2dff3a36d44f:46367 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T11:20:30,100 DEBUG [RS:2;2dff3a36d44f:46367 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/WALs/2dff3a36d44f,46367,1733743228871 2024-12-09T11:20:30,101 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2dff3a36d44f,46367,1733743228871] 2024-12-09T11:20:30,101 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2dff3a36d44f,39663,1733743228795] 2024-12-09T11:20:30,101 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2dff3a36d44f,46259,1733743228656] 2024-12-09T11:20:30,107 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T11:20:30,111 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T11:20:30,134 INFO [RS:2;2dff3a36d44f:46367 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T11:20:30,134 INFO [RS:1;2dff3a36d44f:39663 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T11:20:30,134 INFO [RS:0;2dff3a36d44f:46259 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T11:20:30,152 INFO [RS:2;2dff3a36d44f:46367 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T11:20:30,158 INFO [RS:2;2dff3a36d44f:46367 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T11:20:30,158 INFO [RS:2;2dff3a36d44f:46367 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T11:20:30,158 INFO [RS:1;2dff3a36d44f:39663 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T11:20:30,159 INFO [RS:0;2dff3a36d44f:46259 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T11:20:30,164 INFO [RS:1;2dff3a36d44f:39663 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T11:20:30,164 INFO [RS:1;2dff3a36d44f:39663 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T11:20:30,164 INFO [RS:2;2dff3a36d44f:46367 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T11:20:30,164 INFO [RS:0;2dff3a36d44f:46259 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T11:20:30,164 INFO [RS:0;2dff3a36d44f:46259 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T11:20:30,166 INFO [RS:1;2dff3a36d44f:39663 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T11:20:30,166 INFO [RS:0;2dff3a36d44f:46259 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T11:20:30,170 INFO [RS:0;2dff3a36d44f:46259 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T11:20:30,170 INFO [RS:2;2dff3a36d44f:46367 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T11:20:30,170 INFO [RS:1;2dff3a36d44f:39663 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T11:20:30,172 INFO [RS:2;2dff3a36d44f:46367 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T11:20:30,172 INFO [RS:1;2dff3a36d44f:39663 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T11:20:30,172 INFO [RS:0;2dff3a36d44f:46259 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T11:20:30,172 DEBUG [RS:1;2dff3a36d44f:39663 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:20:30,172 DEBUG [RS:2;2dff3a36d44f:46367 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:20:30,172 DEBUG [RS:0;2dff3a36d44f:46259 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:20:30,172 DEBUG [RS:1;2dff3a36d44f:39663 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:20:30,172 DEBUG [RS:2;2dff3a36d44f:46367 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:20:30,172 DEBUG [RS:1;2dff3a36d44f:39663 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:20:30,173 DEBUG [RS:0;2dff3a36d44f:46259 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:20:30,173 DEBUG [RS:1;2dff3a36d44f:39663 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:20:30,173 DEBUG [RS:2;2dff3a36d44f:46367 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:20:30,173 DEBUG [RS:1;2dff3a36d44f:39663 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:20:30,173 DEBUG [RS:0;2dff3a36d44f:46259 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:20:30,173 DEBUG [RS:2;2dff3a36d44f:46367 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:20:30,173 DEBUG [RS:1;2dff3a36d44f:39663 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2dff3a36d44f:0, corePoolSize=2, maxPoolSize=2 2024-12-09T11:20:30,173 DEBUG [RS:0;2dff3a36d44f:46259 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:20:30,173 DEBUG [RS:2;2dff3a36d44f:46367 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:20:30,173 DEBUG [RS:1;2dff3a36d44f:39663 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:20:30,173 DEBUG [RS:0;2dff3a36d44f:46259 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:20:30,173 DEBUG [RS:2;2dff3a36d44f:46367 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2dff3a36d44f:0, corePoolSize=2, maxPoolSize=2 2024-12-09T11:20:30,173 DEBUG [RS:0;2dff3a36d44f:46259 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2dff3a36d44f:0, corePoolSize=2, maxPoolSize=2 2024-12-09T11:20:30,173 DEBUG [RS:1;2dff3a36d44f:39663 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:20:30,173 DEBUG [RS:0;2dff3a36d44f:46259 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:20:30,173 DEBUG [RS:2;2dff3a36d44f:46367 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:20:30,173 DEBUG [RS:1;2dff3a36d44f:39663 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:20:30,173 DEBUG [RS:0;2dff3a36d44f:46259 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:20:30,174 DEBUG [RS:1;2dff3a36d44f:39663 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:20:30,174 DEBUG [RS:0;2dff3a36d44f:46259 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:20:30,174 DEBUG [RS:2;2dff3a36d44f:46367 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:20:30,174 DEBUG [RS:1;2dff3a36d44f:39663 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:20:30,174 DEBUG [RS:0;2dff3a36d44f:46259 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:20:30,174 DEBUG [RS:2;2dff3a36d44f:46367 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:20:30,174 DEBUG [RS:0;2dff3a36d44f:46259 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:20:30,174 DEBUG [RS:1;2dff3a36d44f:39663 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:20:30,174 DEBUG [RS:2;2dff3a36d44f:46367 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:20:30,174 DEBUG [RS:0;2dff3a36d44f:46259 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:20:30,174 DEBUG [RS:1;2dff3a36d44f:39663 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2dff3a36d44f:0, corePoolSize=3, maxPoolSize=3 2024-12-09T11:20:30,174 DEBUG [RS:2;2dff3a36d44f:46367 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:20:30,174 DEBUG [RS:0;2dff3a36d44f:46259 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2dff3a36d44f:0, corePoolSize=3, maxPoolSize=3 2024-12-09T11:20:30,174 DEBUG [RS:1;2dff3a36d44f:39663 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0, corePoolSize=3, maxPoolSize=3 2024-12-09T11:20:30,174 DEBUG [RS:0;2dff3a36d44f:46259 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0, corePoolSize=3, maxPoolSize=3 2024-12-09T11:20:30,174 DEBUG [RS:2;2dff3a36d44f:46367 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:20:30,174 DEBUG [RS:2;2dff3a36d44f:46367 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2dff3a36d44f:0, corePoolSize=3, maxPoolSize=3 2024-12-09T11:20:30,175 DEBUG [RS:2;2dff3a36d44f:46367 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0, corePoolSize=3, maxPoolSize=3 2024-12-09T11:20:30,175 INFO [RS:1;2dff3a36d44f:39663 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T11:20:30,176 INFO [RS:0;2dff3a36d44f:46259 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T11:20:30,176 INFO [RS:1;2dff3a36d44f:39663 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T11:20:30,176 INFO [RS:2;2dff3a36d44f:46367 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T11:20:30,176 INFO [RS:0;2dff3a36d44f:46259 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T11:20:30,176 INFO [RS:1;2dff3a36d44f:39663 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T11:20:30,176 INFO [RS:2;2dff3a36d44f:46367 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T11:20:30,176 INFO [RS:0;2dff3a36d44f:46259 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T11:20:30,176 INFO [RS:1;2dff3a36d44f:39663 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T11:20:30,176 INFO [RS:2;2dff3a36d44f:46367 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T11:20:30,176 INFO [RS:0;2dff3a36d44f:46259 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T11:20:30,176 INFO [RS:1;2dff3a36d44f:39663 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T11:20:30,176 INFO [RS:0;2dff3a36d44f:46259 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T11:20:30,176 INFO [RS:2;2dff3a36d44f:46367 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T11:20:30,176 INFO [RS:1;2dff3a36d44f:39663 {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,39663,1733743228795-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T11:20:30,176 INFO [RS:2;2dff3a36d44f:46367 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T11:20:30,176 INFO [RS:0;2dff3a36d44f:46259 {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,46259,1733743228656-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T11:20:30,176 INFO [RS:2;2dff3a36d44f:46367 {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,46367,1733743228871-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T11:20:30,203 INFO [RS:0;2dff3a36d44f:46259 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T11:20:30,203 INFO [RS:1;2dff3a36d44f:39663 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T11:20:30,203 INFO [RS:2;2dff3a36d44f:46367 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T11:20:30,205 INFO [RS:0;2dff3a36d44f:46259 {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,46259,1733743228656-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T11:20:30,205 INFO [RS:1;2dff3a36d44f:39663 {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,39663,1733743228795-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T11:20:30,205 INFO [RS:2;2dff3a36d44f:46367 {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,46367,1733743228871-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T11:20:30,206 INFO [RS:2;2dff3a36d44f:46367 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:20:30,206 INFO [RS:1;2dff3a36d44f:39663 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:20:30,206 INFO [RS:0;2dff3a36d44f:46259 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:20:30,206 INFO [RS:2;2dff3a36d44f:46367 {}] regionserver.Replication(171): 2dff3a36d44f,46367,1733743228871 started 2024-12-09T11:20:30,206 INFO [RS:1;2dff3a36d44f:39663 {}] regionserver.Replication(171): 2dff3a36d44f,39663,1733743228795 started 2024-12-09T11:20:30,206 INFO [RS:0;2dff3a36d44f:46259 {}] regionserver.Replication(171): 2dff3a36d44f,46259,1733743228656 started 2024-12-09T11:20:30,238 INFO [RS:2;2dff3a36d44f:46367 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:20:30,238 INFO [RS:0;2dff3a36d44f:46259 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:20:30,239 INFO [RS:2;2dff3a36d44f:46367 {}] regionserver.HRegionServer(1482): Serving as 2dff3a36d44f,46367,1733743228871, RpcServer on 2dff3a36d44f/172.17.0.3:46367, sessionid=0x1012ae9bf670003 2024-12-09T11:20:30,239 INFO [RS:0;2dff3a36d44f:46259 {}] regionserver.HRegionServer(1482): Serving as 2dff3a36d44f,46259,1733743228656, RpcServer on 2dff3a36d44f/172.17.0.3:46259, sessionid=0x1012ae9bf670001 2024-12-09T11:20:30,240 DEBUG [RS:2;2dff3a36d44f:46367 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T11:20:30,240 DEBUG [RS:0;2dff3a36d44f:46259 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T11:20:30,240 DEBUG [RS:2;2dff3a36d44f:46367 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2dff3a36d44f,46367,1733743228871 2024-12-09T11:20:30,240 DEBUG [RS:0;2dff3a36d44f:46259 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2dff3a36d44f,46259,1733743228656 2024-12-09T11:20:30,240 DEBUG [RS:2;2dff3a36d44f:46367 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2dff3a36d44f,46367,1733743228871' 2024-12-09T11:20:30,240 DEBUG [RS:0;2dff3a36d44f:46259 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2dff3a36d44f,46259,1733743228656' 2024-12-09T11:20:30,241 DEBUG [RS:2;2dff3a36d44f:46367 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T11:20:30,241 DEBUG [RS:0;2dff3a36d44f:46259 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T11:20:30,247 DEBUG [RS:0;2dff3a36d44f:46259 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T11:20:30,247 DEBUG [RS:2;2dff3a36d44f:46367 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T11:20:30,248 DEBUG [RS:2;2dff3a36d44f:46367 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T11:20:30,248 DEBUG [RS:2;2dff3a36d44f:46367 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T11:20:30,249 DEBUG [RS:2;2dff3a36d44f:46367 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2dff3a36d44f,46367,1733743228871 2024-12-09T11:20:30,249 DEBUG [RS:2;2dff3a36d44f:46367 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2dff3a36d44f,46367,1733743228871' 2024-12-09T11:20:30,249 DEBUG [RS:2;2dff3a36d44f:46367 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T11:20:30,249 INFO [RS:1;2dff3a36d44f:39663 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:20:30,249 INFO [RS:1;2dff3a36d44f:39663 {}] regionserver.HRegionServer(1482): Serving as 2dff3a36d44f,39663,1733743228795, RpcServer on 2dff3a36d44f/172.17.0.3:39663, sessionid=0x1012ae9bf670002 2024-12-09T11:20:30,250 DEBUG [RS:2;2dff3a36d44f:46367 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T11:20:30,250 DEBUG [RS:1;2dff3a36d44f:39663 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T11:20:30,250 DEBUG [RS:1;2dff3a36d44f:39663 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2dff3a36d44f,39663,1733743228795 2024-12-09T11:20:30,250 DEBUG [RS:1;2dff3a36d44f:39663 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2dff3a36d44f,39663,1733743228795' 2024-12-09T11:20:30,250 DEBUG [RS:1;2dff3a36d44f:39663 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T11:20:30,250 DEBUG [RS:0;2dff3a36d44f:46259 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T11:20:30,250 DEBUG [RS:0;2dff3a36d44f:46259 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T11:20:30,250 DEBUG [RS:0;2dff3a36d44f:46259 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2dff3a36d44f,46259,1733743228656 2024-12-09T11:20:30,250 DEBUG [RS:1;2dff3a36d44f:39663 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T11:20:30,251 DEBUG [RS:0;2dff3a36d44f:46259 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2dff3a36d44f,46259,1733743228656' 2024-12-09T11:20:30,251 DEBUG [RS:0;2dff3a36d44f:46259 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T11:20:30,251 DEBUG [RS:2;2dff3a36d44f:46367 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T11:20:30,251 INFO [RS:2;2dff3a36d44f:46367 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T11:20:30,251 INFO [RS:2;2dff3a36d44f:46367 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T11:20:30,252 DEBUG [RS:0;2dff3a36d44f:46259 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T11:20:30,253 DEBUG [RS:0;2dff3a36d44f:46259 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T11:20:30,253 INFO [RS:0;2dff3a36d44f:46259 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T11:20:30,253 INFO [RS:0;2dff3a36d44f:46259 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T11:20:30,254 DEBUG [RS:1;2dff3a36d44f:39663 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T11:20:30,254 DEBUG [RS:1;2dff3a36d44f:39663 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T11:20:30,254 DEBUG [RS:1;2dff3a36d44f:39663 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2dff3a36d44f,39663,1733743228795 2024-12-09T11:20:30,255 DEBUG [RS:1;2dff3a36d44f:39663 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2dff3a36d44f,39663,1733743228795' 2024-12-09T11:20:30,255 DEBUG [RS:1;2dff3a36d44f:39663 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T11:20:30,256 DEBUG [RS:1;2dff3a36d44f:39663 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T11:20:30,256 DEBUG [RS:1;2dff3a36d44f:39663 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T11:20:30,257 INFO [RS:1;2dff3a36d44f:39663 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T11:20:30,257 INFO [RS:1;2dff3a36d44f:39663 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T11:20:30,262 WARN [2dff3a36d44f:42781 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-09T11:20:30,356 INFO [RS:0;2dff3a36d44f:46259 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T11:20:30,356 INFO [RS:2;2dff3a36d44f:46367 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T11:20:30,357 INFO [RS:1;2dff3a36d44f:39663 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T11:20:30,360 INFO [RS:0;2dff3a36d44f:46259 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2dff3a36d44f%2C46259%2C1733743228656, suffix=, logDir=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/WALs/2dff3a36d44f,46259,1733743228656, archiveDir=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/oldWALs, maxLogs=32 2024-12-09T11:20:30,360 INFO [RS:2;2dff3a36d44f:46367 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2dff3a36d44f%2C46367%2C1733743228871, suffix=, logDir=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/WALs/2dff3a36d44f,46367,1733743228871, archiveDir=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/oldWALs, maxLogs=32 2024-12-09T11:20:30,360 INFO [RS:1;2dff3a36d44f:39663 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2dff3a36d44f%2C39663%2C1733743228795, suffix=, logDir=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/WALs/2dff3a36d44f,39663,1733743228795, archiveDir=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/oldWALs, maxLogs=32 2024-12-09T11:20:30,380 DEBUG [RS:0;2dff3a36d44f:46259 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/WALs/2dff3a36d44f,46259,1733743228656/2dff3a36d44f%2C46259%2C1733743228656.1733743230364, exclude list is [], retry=0 2024-12-09T11:20:30,380 DEBUG [RS:1;2dff3a36d44f:39663 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/WALs/2dff3a36d44f,39663,1733743228795/2dff3a36d44f%2C39663%2C1733743228795.1733743230365, exclude list is [], retry=0 2024-12-09T11:20:30,381 DEBUG [RS:2;2dff3a36d44f:46367 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/WALs/2dff3a36d44f,46367,1733743228871/2dff3a36d44f%2C46367%2C1733743228871.1733743230365, exclude list is [], retry=0 2024-12-09T11:20:30,383 WARN [IPC Server handler 0 on default port 40493 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T11:20:30,383 WARN [IPC Server handler 0 on default port 40493 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T11:20:30,383 WARN [IPC Server handler 0 on default port 40493 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T11:20:30,385 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46359,DS-8f8100b1-22b1-43bf-9468-405dfca7481e,DISK] 2024-12-09T11:20:30,385 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44093,DS-049b5d3a-b506-46ca-8e7a-3fb74c0e7d3e,DISK] 2024-12-09T11:20:30,385 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34459,DS-f790a494-97c1-4864-a7b1-6442795d840b,DISK] 2024-12-09T11:20:30,387 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34459,DS-f790a494-97c1-4864-a7b1-6442795d840b,DISK] 2024-12-09T11:20:30,415 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46359,DS-8f8100b1-22b1-43bf-9468-405dfca7481e,DISK] 2024-12-09T11:20:30,415 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44093,DS-049b5d3a-b506-46ca-8e7a-3fb74c0e7d3e,DISK] 2024-12-09T11:20:30,415 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46359,DS-8f8100b1-22b1-43bf-9468-405dfca7481e,DISK] 2024-12-09T11:20:30,416 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44093,DS-049b5d3a-b506-46ca-8e7a-3fb74c0e7d3e,DISK] 2024-12-09T11:20:30,420 INFO [RS:1;2dff3a36d44f:39663 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/WALs/2dff3a36d44f,39663,1733743228795/2dff3a36d44f%2C39663%2C1733743228795.1733743230365 2024-12-09T11:20:30,420 DEBUG [RS:1;2dff3a36d44f:39663 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39935:39935),(127.0.0.1/127.0.0.1:43581:43581),(127.0.0.1/127.0.0.1:39985:39985)] 2024-12-09T11:20:30,423 INFO [RS:0;2dff3a36d44f:46259 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/WALs/2dff3a36d44f,46259,1733743228656/2dff3a36d44f%2C46259%2C1733743228656.1733743230364 2024-12-09T11:20:30,423 INFO [RS:2;2dff3a36d44f:46367 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/WALs/2dff3a36d44f,46367,1733743228871/2dff3a36d44f%2C46367%2C1733743228871.1733743230365 2024-12-09T11:20:30,423 DEBUG [RS:0;2dff3a36d44f:46259 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39985:39985),(127.0.0.1/127.0.0.1:43581:43581)] 2024-12-09T11:20:30,424 DEBUG [RS:2;2dff3a36d44f:46367 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39935:39935),(127.0.0.1/127.0.0.1:39985:39985),(127.0.0.1/127.0.0.1:43581:43581)] 2024-12-09T11:20:30,515 DEBUG [2dff3a36d44f:42781 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-09T11:20:30,523 DEBUG [2dff3a36d44f:42781 {}] balancer.BalancerClusterState(204): Hosts are {2dff3a36d44f=0} racks are {/default-rack=0} 2024-12-09T11:20:30,530 DEBUG [2dff3a36d44f:42781 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T11:20:30,530 DEBUG [2dff3a36d44f:42781 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T11:20:30,530 DEBUG [2dff3a36d44f:42781 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T11:20:30,530 DEBUG [2dff3a36d44f:42781 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T11:20:30,530 DEBUG [2dff3a36d44f:42781 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T11:20:30,530 DEBUG [2dff3a36d44f:42781 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T11:20:30,530 INFO [2dff3a36d44f:42781 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T11:20:30,530 INFO [2dff3a36d44f:42781 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T11:20:30,530 INFO [2dff3a36d44f:42781 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T11:20:30,530 DEBUG [2dff3a36d44f:42781 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T11:20:30,538 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=2dff3a36d44f,39663,1733743228795 2024-12-09T11:20:30,544 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2dff3a36d44f,39663,1733743228795, state=OPENING 2024-12-09T11:20:30,549 DEBUG [PEWorker-4 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T11:20:30,550 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46367-0x1012ae9bf670003, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:20:30,550 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39663-0x1012ae9bf670002, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:20:30,550 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42781-0x1012ae9bf670000, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:20:30,550 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46259-0x1012ae9bf670001, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:20:30,551 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:20:30,551 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:20:30,552 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:20:30,552 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:20:30,553 DEBUG [PEWorker-4 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T11:20:30,556 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=2dff3a36d44f,39663,1733743228795}] 2024-12-09T11:20:30,735 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T11:20:30,737 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59417, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T11:20:30,751 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T11:20:30,752 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T11:20:30,752 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-09T11:20:30,756 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2dff3a36d44f%2C39663%2C1733743228795.meta, suffix=.meta, logDir=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/WALs/2dff3a36d44f,39663,1733743228795, archiveDir=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/oldWALs, maxLogs=32 2024-12-09T11:20:30,774 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/WALs/2dff3a36d44f,39663,1733743228795/2dff3a36d44f%2C39663%2C1733743228795.meta.1733743230757.meta, exclude list is [], retry=0 2024-12-09T11:20:30,777 WARN [IPC Server handler 2 on default port 40493 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T11:20:30,777 WARN [IPC Server handler 2 on default port 40493 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T11:20:30,777 WARN [IPC Server handler 2 on default port 40493 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T11:20:30,779 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44093,DS-049b5d3a-b506-46ca-8e7a-3fb74c0e7d3e,DISK] 2024-12-09T11:20:30,779 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46359,DS-8f8100b1-22b1-43bf-9468-405dfca7481e,DISK] 2024-12-09T11:20:30,782 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/WALs/2dff3a36d44f,39663,1733743228795/2dff3a36d44f%2C39663%2C1733743228795.meta.1733743230757.meta 2024-12-09T11:20:30,782 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39985:39985),(127.0.0.1/127.0.0.1:43581:43581)] 2024-12-09T11:20:30,783 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T11:20:30,784 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T11:20:30,787 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T11:20:30,792 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T11:20:30,796 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T11:20:30,797 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:20:30,797 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T11:20:30,797 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T11:20:30,800 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T11:20:30,802 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T11:20:30,802 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:30,803 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:20:30,803 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T11:20:30,805 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T11:20:30,805 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:30,806 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:20:30,806 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T11:20:30,808 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T11:20:30,808 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:30,808 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:20:30,809 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T11:20:30,810 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T11:20:30,810 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:30,811 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:20:30,811 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T11:20:30,812 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/hbase/meta/1588230740 2024-12-09T11:20:30,815 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/hbase/meta/1588230740 2024-12-09T11:20:30,818 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T11:20:30,818 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T11:20:30,822 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T11:20:30,825 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T11:20:30,828 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67841577, jitterRate=0.010918274521827698}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T11:20:30,828 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T11:20:30,830 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733743230797Writing region info on filesystem at 1733743230797Initializing all the Stores at 1733743230800 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743230800Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743230800Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743230800Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743230800Cleaning up temporary data from old regions at 1733743230818 (+18 ms)Running coprocessor post-open hooks at 1733743230828 (+10 ms)Region opened successfully at 1733743230830 (+2 ms) 2024-12-09T11:20:30,839 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733743230725 2024-12-09T11:20:30,855 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T11:20:30,855 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T11:20:30,858 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=2dff3a36d44f,39663,1733743228795 2024-12-09T11:20:30,861 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2dff3a36d44f,39663,1733743228795, state=OPEN 2024-12-09T11:20:30,864 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42781-0x1012ae9bf670000, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T11:20:30,864 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46367-0x1012ae9bf670003, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T11:20:30,864 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39663-0x1012ae9bf670002, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T11:20:30,865 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:20:30,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46259-0x1012ae9bf670001, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T11:20:30,865 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:20:30,865 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:20:30,865 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:20:30,866 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=2dff3a36d44f,39663,1733743228795 2024-12-09T11:20:30,872 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T11:20:30,872 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=2dff3a36d44f,39663,1733743228795 in 311 msec 2024-12-09T11:20:30,881 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T11:20:30,881 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 778 msec 2024-12-09T11:20:30,883 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T11:20:30,883 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T11:20:30,910 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T11:20:30,911 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2dff3a36d44f,39663,1733743228795, seqNum=-1] 2024-12-09T11:20:30,936 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:20:30,939 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:58053, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:20:30,962 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1450 sec 2024-12-09T11:20:30,962 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733743230962, completionTime=-1 2024-12-09T11:20:30,965 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-09T11:20:30,965 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T11:20:30,994 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-09T11:20:30,994 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733743290994 2024-12-09T11:20:30,994 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733743350994 2024-12-09T11:20:30,994 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 29 msec 2024-12-09T11:20:30,995 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-09T11:20:31,002 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,42781,1733743227566-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T11:20:31,002 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,42781,1733743227566-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:20:31,002 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,42781,1733743227566-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:20:31,003 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-2dff3a36d44f:42781, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:20:31,004 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T11:20:31,004 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T11:20:31,011 DEBUG [master/2dff3a36d44f:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T11:20:31,063 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.097sec 2024-12-09T11:20:31,065 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T11:20:31,067 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T11:20:31,068 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T11:20:31,068 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T11:20:31,069 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T11:20:31,070 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,42781,1733743227566-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T11:20:31,070 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,42781,1733743227566-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T11:20:31,072 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T11:20:31,072 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T11:20:31,072 WARN [RedundancyMonitor {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK, ARCHIVE], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T11:20:31,072 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) All required storage types are unavailable: unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T11:20:31,073 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T11:20:31,073 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T11:20:31,073 WARN [RedundancyMonitor {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK, ARCHIVE], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T11:20:31,073 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) All required storage types are unavailable: unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T11:20:31,080 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T11:20:31,081 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T11:20:31,081 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,42781,1733743227566-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:20:31,115 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e4c2dcd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:20:31,116 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 2dff3a36d44f,42781,-1 for getting cluster id 2024-12-09T11:20:31,119 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T11:20:31,127 DEBUG [HMaster-EventLoopGroup-2-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ed218c8a-0bff-4a39-941d-68ae72b73aae' 2024-12-09T11:20:31,129 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T11:20:31,129 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ed218c8a-0bff-4a39-941d-68ae72b73aae" 2024-12-09T11:20:31,129 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3352a856, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:20:31,130 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2dff3a36d44f,42781,-1] 2024-12-09T11:20:31,132 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T11:20:31,134 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:20:31,135 INFO [HMaster-EventLoopGroup-2-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51030, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T11:20:31,137 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5fea663b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:20:31,138 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T11:20:31,144 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2dff3a36d44f,39663,1733743228795, seqNum=-1] 2024-12-09T11:20:31,144 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:20:31,147 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45782, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:20:31,167 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=2dff3a36d44f,42781,1733743227566 2024-12-09T11:20:31,168 INFO [Time-limited test {}] wal.AbstractTestWALReplay(147): hbase.rootdir=hdfs://localhost:40493/hbase 2024-12-09T11:20:31,184 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testNameConflictWhenSplit0 Thread=359, OpenFileDescriptor=599, MaxFileDescriptor=1048576, SystemLoadAverage=335, ProcessCount=11, AvailableMemoryMB=1380 2024-12-09T11:20:31,212 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T11:20:31,216 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T11:20:31,217 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T11:20:31,222 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-41833951, suffix=, logDir=hdfs://localhost:40493/hbase/WALs/hregion-41833951, archiveDir=hdfs://localhost:40493/hbase/oldWALs, maxLogs=32 2024-12-09T11:20:31,246 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-41833951/hregion-41833951.1733743231224, exclude list is [], retry=0 2024-12-09T11:20:31,249 WARN [IPC Server handler 0 on default port 40493 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T11:20:31,249 WARN [IPC Server handler 0 on default port 40493 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T11:20:31,249 WARN [IPC Server handler 0 on default port 40493 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T11:20:31,251 DEBUG [AsyncFSWAL-8-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44093,DS-049b5d3a-b506-46ca-8e7a-3fb74c0e7d3e,DISK] 2024-12-09T11:20:31,251 DEBUG [AsyncFSWAL-8-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46359,DS-8f8100b1-22b1-43bf-9468-405dfca7481e,DISK] 2024-12-09T11:20:31,254 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-41833951/hregion-41833951.1733743231224 2024-12-09T11:20:31,255 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43581:43581),(127.0.0.1/127.0.0.1:39985:39985)] 2024-12-09T11:20:31,255 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 680c89b17c03923635f5972d35d2fb93, NAME => 'testReplayEditsWrittenIntoWAL,,1733743231213.680c89b17c03923635f5972d35d2fb93.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenIntoWAL', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40493/hbase 2024-12-09T11:20:31,260 WARN [IPC Server handler 1 on default port 40493 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T11:20:31,260 WARN [IPC Server handler 1 on default port 40493 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T11:20:31,260 WARN [IPC Server handler 1 on default port 40493 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T11:20:31,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741838_1014 (size=64) 2024-12-09T11:20:31,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741838_1014 (size=64) 2024-12-09T11:20:31,268 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1733743231213.680c89b17c03923635f5972d35d2fb93.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:20:31,270 INFO [StoreOpener-680c89b17c03923635f5972d35d2fb93-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 680c89b17c03923635f5972d35d2fb93 2024-12-09T11:20:31,272 INFO [StoreOpener-680c89b17c03923635f5972d35d2fb93-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 680c89b17c03923635f5972d35d2fb93 columnFamilyName a 2024-12-09T11:20:31,273 DEBUG [StoreOpener-680c89b17c03923635f5972d35d2fb93-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:31,273 INFO [StoreOpener-680c89b17c03923635f5972d35d2fb93-1 {}] regionserver.HStore(327): Store=680c89b17c03923635f5972d35d2fb93/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:31,274 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 680c89b17c03923635f5972d35d2fb93 2024-12-09T11:20:31,275 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/680c89b17c03923635f5972d35d2fb93 2024-12-09T11:20:31,276 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/680c89b17c03923635f5972d35d2fb93 2024-12-09T11:20:31,276 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 680c89b17c03923635f5972d35d2fb93 2024-12-09T11:20:31,276 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 680c89b17c03923635f5972d35d2fb93 2024-12-09T11:20:31,280 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 680c89b17c03923635f5972d35d2fb93 2024-12-09T11:20:31,287 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/680c89b17c03923635f5972d35d2fb93/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T11:20:31,289 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 680c89b17c03923635f5972d35d2fb93; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68641056, jitterRate=0.022831439971923828}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T11:20:31,291 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 680c89b17c03923635f5972d35d2fb93: Writing region info on filesystem at 1733743231268Initializing all the Stores at 1733743231270 (+2 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743231270Cleaning up temporary data from old regions at 1733743231276 (+6 ms)Region opened successfully at 1733743231290 (+14 ms) 2024-12-09T11:20:31,291 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 680c89b17c03923635f5972d35d2fb93, disabling compactions & flushes 2024-12-09T11:20:31,291 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenIntoWAL,,1733743231213.680c89b17c03923635f5972d35d2fb93. 2024-12-09T11:20:31,291 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenIntoWAL,,1733743231213.680c89b17c03923635f5972d35d2fb93. 2024-12-09T11:20:31,291 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenIntoWAL,,1733743231213.680c89b17c03923635f5972d35d2fb93. after waiting 0 ms 2024-12-09T11:20:31,291 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenIntoWAL,,1733743231213.680c89b17c03923635f5972d35d2fb93. 2024-12-09T11:20:31,292 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenIntoWAL,,1733743231213.680c89b17c03923635f5972d35d2fb93. 2024-12-09T11:20:31,292 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 680c89b17c03923635f5972d35d2fb93: Waiting for close lock at 1733743231291Disabling compacts and flushes for region at 1733743231291Disabling writes for close at 1733743231291Writing region close event to WAL at 1733743231292 (+1 ms)Closed at 1733743231292 2024-12-09T11:20:31,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741837_1013 (size=95) 2024-12-09T11:20:31,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741837_1013 (size=95) 2024-12-09T11:20:31,309 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-09T11:20:31,310 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-41833951:(num 1733743231224) 2024-12-09T11:20:31,312 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor testMonitor 2024-12-09T11:20:31,319 WARN [IPC Server handler 4 on default port 40493 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T11:20:31,320 WARN [IPC Server handler 4 on default port 40493 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T11:20:31,320 WARN [IPC Server handler 4 on default port 40493 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T11:20:31,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741839_1015 (size=320) 2024-12-09T11:20:31,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741839_1015 (size=320) 2024-12-09T11:20:31,330 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor testMonitor 2024-12-09T11:20:31,334 WARN [IPC Server handler 0 on default port 40493 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T11:20:31,334 WARN [IPC Server handler 0 on default port 40493 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T11:20:31,334 WARN [IPC Server handler 0 on default port 40493 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T11:20:31,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741840_1016 (size=253) 2024-12-09T11:20:31,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741840_1016 (size=253) 2024-12-09T11:20:31,774 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:40493/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733743231211/wal-1, size=320 (320bytes) 2024-12-09T11:20:31,776 DEBUG [Time-limited test {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-09T11:20:31,776 DEBUG [Time-limited test {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-09T11:20:31,776 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40493/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733743231211/wal-1 2024-12-09T11:20:31,783 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:40493/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733743231211/wal-1 after 5ms 2024-12-09T11:20:31,792 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:40493/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733743231211/wal-1: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-09T11:20:31,793 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:40493/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733743231211/wal-1 took 20ms 2024-12-09T11:20:31,811 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:40493/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733743231211/wal-1 so closing down 2024-12-09T11:20:31,812 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-09T11:20:31,815 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000001-wal-1.temp 2024-12-09T11:20:31,817 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/680c89b17c03923635f5972d35d2fb93/recovered.edits/0000000000000000001-wal-1.temp 2024-12-09T11:20:31,818 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-09T11:20:31,820 WARN [IPC Server handler 3 on default port 40493 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T11:20:31,820 WARN [IPC Server handler 3 on default port 40493 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T11:20:31,820 WARN [IPC Server handler 3 on default port 40493 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T11:20:31,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741841_1017 (size=320) 2024-12-09T11:20:31,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741841_1017 (size=320) 2024-12-09T11:20:31,837 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/680c89b17c03923635f5972d35d2fb93/recovered.edits/0000000000000000001-wal-1.temp (wrote 2 edits, skipped 0 edits in 0 ms) 2024-12-09T11:20:31,841 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/680c89b17c03923635f5972d35d2fb93/recovered.edits/0000000000000000001-wal-1.temp to hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/680c89b17c03923635f5972d35d2fb93/recovered.edits/0000000000000000002 2024-12-09T11:20:31,845 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 2 edits across 1 Regions in 41 ms; skipped=0; WAL=hdfs://localhost:40493/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733743231211/wal-1, size=320, length=320, corrupted=false, cancelled=false 2024-12-09T11:20:31,845 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:40493/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733743231211/wal-1, journal: Splitting hdfs://localhost:40493/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733743231211/wal-1, size=320 (320bytes) at 1733743231774Finishing writing output for hdfs://localhost:40493/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733743231211/wal-1 so closing down at 1733743231812 (+38 ms)Creating recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/680c89b17c03923635f5972d35d2fb93/recovered.edits/0000000000000000001-wal-1.temp at 1733743231817 (+5 ms)3 split writer threads finished at 1733743231818 (+1 ms)Closed recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/680c89b17c03923635f5972d35d2fb93/recovered.edits/0000000000000000001-wal-1.temp (wrote 2 edits, skipped 0 edits in 0 ms) at 1733743231837 (+19 ms)Rename recovered edits hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/680c89b17c03923635f5972d35d2fb93/recovered.edits/0000000000000000001-wal-1.temp to hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/680c89b17c03923635f5972d35d2fb93/recovered.edits/0000000000000000002 at 1733743231841 (+4 ms)Processed 2 edits across 1 Regions in 41 ms; skipped=0; WAL=hdfs://localhost:40493/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733743231211/wal-1, size=320, length=320, corrupted=false, cancelled=false at 1733743231845 (+4 ms) 2024-12-09T11:20:31,862 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:40493/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733743231211/wal-2, size=253 (253bytes) 2024-12-09T11:20:31,862 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40493/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733743231211/wal-2 2024-12-09T11:20:31,864 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:40493/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733743231211/wal-2 after 1ms 2024-12-09T11:20:31,868 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:40493/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733743231211/wal-2: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-09T11:20:31,868 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:40493/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733743231211/wal-2 took 6ms 2024-12-09T11:20:31,871 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:40493/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733743231211/wal-2 so closing down 2024-12-09T11:20:31,871 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-09T11:20:31,875 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000002-wal-2.temp 2024-12-09T11:20:31,877 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/680c89b17c03923635f5972d35d2fb93/recovered.edits/0000000000000000002-wal-2.temp 2024-12-09T11:20:31,877 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-09T11:20:31,879 WARN [IPC Server handler 4 on default port 40493 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T11:20:31,879 WARN [IPC Server handler 4 on default port 40493 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T11:20:31,879 WARN [IPC Server handler 4 on default port 40493 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T11:20:31,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741842_1018 (size=253) 2024-12-09T11:20:31,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741842_1018 (size=253) 2024-12-09T11:20:31,890 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/680c89b17c03923635f5972d35d2fb93/recovered.edits/0000000000000000002-wal-2.temp (wrote 1 edits, skipped 0 edits in 0 ms) 2024-12-09T11:20:31,896 DEBUG [split-log-closeStream-pool-0 {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/680c89b17c03923635f5972d35d2fb93/recovered.edits/0000000000000000002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-09T11:20:31,898 WARN [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(184): Found existing old edits file and we have less entries. Deleting hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/680c89b17c03923635f5972d35d2fb93/recovered.edits/0000000000000000002-wal-2.temp, length=253 2024-12-09T11:20:31,900 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 1 edits across 1 Regions in 32 ms; skipped=0; WAL=hdfs://localhost:40493/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733743231211/wal-2, size=253, length=253, corrupted=false, cancelled=false 2024-12-09T11:20:31,900 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:40493/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733743231211/wal-2, journal: Splitting hdfs://localhost:40493/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733743231211/wal-2, size=253 (253bytes) at 1733743231862Finishing writing output for hdfs://localhost:40493/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733743231211/wal-2 so closing down at 1733743231871 (+9 ms)Creating recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/680c89b17c03923635f5972d35d2fb93/recovered.edits/0000000000000000002-wal-2.temp at 1733743231877 (+6 ms)3 split writer threads finished at 1733743231877Closed recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/680c89b17c03923635f5972d35d2fb93/recovered.edits/0000000000000000002-wal-2.temp (wrote 1 edits, skipped 0 edits in 0 ms) at 1733743231890 (+13 ms)Processed 1 edits across 1 Regions in 32 ms; skipped=0; WAL=hdfs://localhost:40493/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733743231211/wal-2, size=253, length=253, corrupted=false, cancelled=false at 1733743231900 (+10 ms) 2024-12-09T11:20:31,901 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-09T11:20:31,903 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:40493/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733743231211, archiveDir=hdfs://localhost:40493/hbase/oldWALs, maxLogs=32 2024-12-09T11:20:31,926 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testnameconflictwhensplit0-manual,16010,1733743231211/wal.1733743231905, exclude list is [], retry=0 2024-12-09T11:20:31,931 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34459,DS-f790a494-97c1-4864-a7b1-6442795d840b,DISK] 2024-12-09T11:20:31,932 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46359,DS-8f8100b1-22b1-43bf-9468-405dfca7481e,DISK] 2024-12-09T11:20:31,932 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44093,DS-049b5d3a-b506-46ca-8e7a-3fb74c0e7d3e,DISK] 2024-12-09T11:20:31,937 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testnameconflictwhensplit0-manual,16010,1733743231211/wal.1733743231905 2024-12-09T11:20:31,938 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39935:39935),(127.0.0.1/127.0.0.1:39985:39985),(127.0.0.1/127.0.0.1:43581:43581)] 2024-12-09T11:20:31,938 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 680c89b17c03923635f5972d35d2fb93, NAME => 'testReplayEditsWrittenIntoWAL,,1733743231213.680c89b17c03923635f5972d35d2fb93.', STARTKEY => '', ENDKEY => ''} 2024-12-09T11:20:31,938 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1733743231213.680c89b17c03923635f5972d35d2fb93.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:20:31,938 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 680c89b17c03923635f5972d35d2fb93 2024-12-09T11:20:31,938 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 680c89b17c03923635f5972d35d2fb93 2024-12-09T11:20:31,941 INFO [StoreOpener-680c89b17c03923635f5972d35d2fb93-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 680c89b17c03923635f5972d35d2fb93 2024-12-09T11:20:31,943 INFO [StoreOpener-680c89b17c03923635f5972d35d2fb93-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 680c89b17c03923635f5972d35d2fb93 columnFamilyName a 2024-12-09T11:20:31,943 DEBUG [StoreOpener-680c89b17c03923635f5972d35d2fb93-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:31,944 INFO [StoreOpener-680c89b17c03923635f5972d35d2fb93-1 {}] regionserver.HStore(327): Store=680c89b17c03923635f5972d35d2fb93/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:31,944 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 680c89b17c03923635f5972d35d2fb93 2024-12-09T11:20:31,945 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/680c89b17c03923635f5972d35d2fb93 2024-12-09T11:20:31,953 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/680c89b17c03923635f5972d35d2fb93 2024-12-09T11:20:31,954 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/680c89b17c03923635f5972d35d2fb93/recovered.edits/0000000000000000002 2024-12-09T11:20:31,959 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/680c89b17c03923635f5972d35d2fb93/recovered.edits/0000000000000000002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-09T11:20:31,967 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 2, skipped 0, firstSequenceIdInLog=1, maxSequenceIdInLog=2, path=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/680c89b17c03923635f5972d35d2fb93/recovered.edits/0000000000000000002 2024-12-09T11:20:31,970 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 680c89b17c03923635f5972d35d2fb93 1/1 column families, dataSize=108 B heapSize=512 B 2024-12-09T11:20:32,022 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/680c89b17c03923635f5972d35d2fb93/.tmp/a/8d5c8837a5c3494e94fbb53cebb9226b is 58, key is testReplayEditsWrittenIntoWAL/a:1/1733743231310/Put/seqid=0 2024-12-09T11:20:32,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741844_1020 (size=5170) 2024-12-09T11:20:32,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741844_1020 (size=5170) 2024-12-09T11:20:32,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741844_1020 (size=5170) 2024-12-09T11:20:32,037 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=108 B at sequenceid=2 (bloomFilter=true), to=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/680c89b17c03923635f5972d35d2fb93/.tmp/a/8d5c8837a5c3494e94fbb53cebb9226b 2024-12-09T11:20:32,091 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/680c89b17c03923635f5972d35d2fb93/.tmp/a/8d5c8837a5c3494e94fbb53cebb9226b as hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/680c89b17c03923635f5972d35d2fb93/a/8d5c8837a5c3494e94fbb53cebb9226b 2024-12-09T11:20:32,102 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/680c89b17c03923635f5972d35d2fb93/a/8d5c8837a5c3494e94fbb53cebb9226b, entries=2, sequenceid=2, filesize=5.0 K 2024-12-09T11:20:32,109 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~108 B/108, heapSize ~496 B/496, currentSize=0 B/0 for 680c89b17c03923635f5972d35d2fb93 in 137ms, sequenceid=2, compaction requested=false; wal=null 2024-12-09T11:20:32,111 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/680c89b17c03923635f5972d35d2fb93/recovered.edits/0000000000000000002 2024-12-09T11:20:32,112 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 680c89b17c03923635f5972d35d2fb93 2024-12-09T11:20:32,112 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 680c89b17c03923635f5972d35d2fb93 2024-12-09T11:20:32,116 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 680c89b17c03923635f5972d35d2fb93 2024-12-09T11:20:32,119 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/680c89b17c03923635f5972d35d2fb93/recovered.edits/2.seqid, newMaxSeqId=2, maxSeqId=1 2024-12-09T11:20:32,121 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 680c89b17c03923635f5972d35d2fb93; next sequenceid=3; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63386284, jitterRate=-0.05547076463699341}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T11:20:32,122 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 680c89b17c03923635f5972d35d2fb93: Writing region info on filesystem at 1733743231938Initializing all the Stores at 1733743231940 (+2 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743231941 (+1 ms)Obtaining lock to block concurrent updates at 1733743231970 (+29 ms)Preparing flush snapshotting stores in 680c89b17c03923635f5972d35d2fb93 at 1733743231970Finished memstore snapshotting testReplayEditsWrittenIntoWAL,,1733743231213.680c89b17c03923635f5972d35d2fb93., syncing WAL and waiting on mvcc, flushsize=dataSize=108, getHeapSize=496, getOffHeapSize=0, getCellsCount=2 at 1733743231973 (+3 ms)Flushing stores of testReplayEditsWrittenIntoWAL,,1733743231213.680c89b17c03923635f5972d35d2fb93. at 1733743231973Flushing 680c89b17c03923635f5972d35d2fb93/a: creating writer at 1733743231975 (+2 ms)Flushing 680c89b17c03923635f5972d35d2fb93/a: appending metadata at 1733743232012 (+37 ms)Flushing 680c89b17c03923635f5972d35d2fb93/a: closing flushed file at 1733743232015 (+3 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@661465e3: reopening flushed file at 1733743232089 (+74 ms)Finished flush of dataSize ~108 B/108, heapSize ~496 B/496, currentSize=0 B/0 for 680c89b17c03923635f5972d35d2fb93 in 137ms, sequenceid=2, compaction requested=false; wal=null at 1733743232109 (+20 ms)Cleaning up temporary data from old regions at 1733743232112 (+3 ms)Region opened successfully at 1733743232121 (+9 ms) 2024-12-09T11:20:32,155 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testNameConflictWhenSplit0 Thread=369 (was 359) Potentially hanging thread: PacketResponder: BP-32692473-172.17.0.3-1733743223895:blk_1073741843_1019, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-32692473-172.17.0.3-1733743223895:blk_1073741843_1019, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1633493794_22 at /127.0.0.1:60572 [Receiving block BP-32692473-172.17.0.3-1733743223895:blk_1073741843_1019] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1633493794_22 at /127.0.0.1:58566 [Receiving block BP-32692473-172.17.0.3-1733743223895:blk_1073741843_1019] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-0-hdfs://localhost:40493/hbase-prefix:default java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: TestAsyncWALReplay-pool-0 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1633493794_22 at /127.0.0.1:60462 [Waiting for operation #8] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-8-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1633493794_22 at /127.0.0.1:35860 [Waiting for operation #11] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-8-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-32692473-172.17.0.3-1733743223895:blk_1073741843_1019, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1633493794_22 at /127.0.0.1:35932 [Receiving block BP-32692473-172.17.0.3-1733743223895:blk_1073741843_1019] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=691 (was 599) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=340 (was 335) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=1343 (was 1380) 2024-12-09T11:20:32,167 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testNameConflictWhenSplit1 Thread=369, OpenFileDescriptor=691, MaxFileDescriptor=1048576, SystemLoadAverage=340, ProcessCount=11, AvailableMemoryMB=1341 2024-12-09T11:20:32,183 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T11:20:32,185 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T11:20:32,186 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T11:20:32,190 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-67615967, suffix=, logDir=hdfs://localhost:40493/hbase/WALs/hregion-67615967, archiveDir=hdfs://localhost:40493/hbase/oldWALs, maxLogs=32 2024-12-09T11:20:32,204 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-67615967/hregion-67615967.1733743232190, exclude list is [], retry=0 2024-12-09T11:20:32,208 DEBUG [AsyncFSWAL-10-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46359,DS-8f8100b1-22b1-43bf-9468-405dfca7481e,DISK] 2024-12-09T11:20:32,208 DEBUG [AsyncFSWAL-10-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44093,DS-049b5d3a-b506-46ca-8e7a-3fb74c0e7d3e,DISK] 2024-12-09T11:20:32,209 DEBUG [AsyncFSWAL-10-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34459,DS-f790a494-97c1-4864-a7b1-6442795d840b,DISK] 2024-12-09T11:20:32,212 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-67615967/hregion-67615967.1733743232190 2024-12-09T11:20:32,212 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39985:39985),(127.0.0.1/127.0.0.1:43581:43581),(127.0.0.1/127.0.0.1:39935:39935)] 2024-12-09T11:20:32,213 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => ff20ec44b323bc84e4c317acb7d1b004, NAME => 'testReplayEditsWrittenIntoWAL,,1733743232184.ff20ec44b323bc84e4c317acb7d1b004.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenIntoWAL', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40493/hbase 2024-12-09T11:20:32,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741846_1022 (size=64) 2024-12-09T11:20:32,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741846_1022 (size=64) 2024-12-09T11:20:32,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741846_1022 (size=64) 2024-12-09T11:20:32,227 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1733743232184.ff20ec44b323bc84e4c317acb7d1b004.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:20:32,228 INFO [StoreOpener-ff20ec44b323bc84e4c317acb7d1b004-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region ff20ec44b323bc84e4c317acb7d1b004 2024-12-09T11:20:32,231 INFO [StoreOpener-ff20ec44b323bc84e4c317acb7d1b004-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ff20ec44b323bc84e4c317acb7d1b004 columnFamilyName a 2024-12-09T11:20:32,231 DEBUG [StoreOpener-ff20ec44b323bc84e4c317acb7d1b004-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:32,232 INFO [StoreOpener-ff20ec44b323bc84e4c317acb7d1b004-1 {}] regionserver.HStore(327): Store=ff20ec44b323bc84e4c317acb7d1b004/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:32,232 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for ff20ec44b323bc84e4c317acb7d1b004 2024-12-09T11:20:32,233 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/ff20ec44b323bc84e4c317acb7d1b004 2024-12-09T11:20:32,233 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/ff20ec44b323bc84e4c317acb7d1b004 2024-12-09T11:20:32,234 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for ff20ec44b323bc84e4c317acb7d1b004 2024-12-09T11:20:32,234 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for ff20ec44b323bc84e4c317acb7d1b004 2024-12-09T11:20:32,237 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for ff20ec44b323bc84e4c317acb7d1b004 2024-12-09T11:20:32,241 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/ff20ec44b323bc84e4c317acb7d1b004/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T11:20:32,242 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened ff20ec44b323bc84e4c317acb7d1b004; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75416224, jitterRate=0.12378931045532227}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T11:20:32,242 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for ff20ec44b323bc84e4c317acb7d1b004: Writing region info on filesystem at 1733743232227Initializing all the Stores at 1733743232228 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743232228Cleaning up temporary data from old regions at 1733743232234 (+6 ms)Region opened successfully at 1733743232242 (+8 ms) 2024-12-09T11:20:32,243 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing ff20ec44b323bc84e4c317acb7d1b004, disabling compactions & flushes 2024-12-09T11:20:32,243 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenIntoWAL,,1733743232184.ff20ec44b323bc84e4c317acb7d1b004. 2024-12-09T11:20:32,243 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenIntoWAL,,1733743232184.ff20ec44b323bc84e4c317acb7d1b004. 2024-12-09T11:20:32,243 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenIntoWAL,,1733743232184.ff20ec44b323bc84e4c317acb7d1b004. after waiting 0 ms 2024-12-09T11:20:32,243 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenIntoWAL,,1733743232184.ff20ec44b323bc84e4c317acb7d1b004. 2024-12-09T11:20:32,243 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenIntoWAL,,1733743232184.ff20ec44b323bc84e4c317acb7d1b004. 2024-12-09T11:20:32,243 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for ff20ec44b323bc84e4c317acb7d1b004: Waiting for close lock at 1733743232242Disabling compacts and flushes for region at 1733743232242Disabling writes for close at 1733743232243 (+1 ms)Writing region close event to WAL at 1733743232243Closed at 1733743232243 2024-12-09T11:20:32,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741845_1021 (size=95) 2024-12-09T11:20:32,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741845_1021 (size=95) 2024-12-09T11:20:32,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741845_1021 (size=95) 2024-12-09T11:20:32,252 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-09T11:20:32,252 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-67615967:(num 1733743232190) 2024-12-09T11:20:32,253 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor testMonitor 2024-12-09T11:20:32,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741847_1023 (size=320) 2024-12-09T11:20:32,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741847_1023 (size=320) 2024-12-09T11:20:32,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741847_1023 (size=320) 2024-12-09T11:20:32,269 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor testMonitor 2024-12-09T11:20:32,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741848_1024 (size=253) 2024-12-09T11:20:32,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741848_1024 (size=253) 2024-12-09T11:20:32,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741848_1024 (size=253) 2024-12-09T11:20:32,299 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:40493/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733743232182/wal-2, size=253 (253bytes) 2024-12-09T11:20:32,299 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40493/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733743232182/wal-2 2024-12-09T11:20:32,300 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:40493/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733743232182/wal-2 after 1ms 2024-12-09T11:20:32,303 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:40493/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733743232182/wal-2: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-09T11:20:32,304 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:40493/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733743232182/wal-2 took 5ms 2024-12-09T11:20:32,306 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:40493/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733743232182/wal-2 so closing down 2024-12-09T11:20:32,307 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-09T11:20:32,309 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000002-wal-2.temp 2024-12-09T11:20:32,310 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/ff20ec44b323bc84e4c317acb7d1b004/recovered.edits/0000000000000000002-wal-2.temp 2024-12-09T11:20:32,311 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-09T11:20:32,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741849_1025 (size=253) 2024-12-09T11:20:32,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741849_1025 (size=253) 2024-12-09T11:20:32,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741849_1025 (size=253) 2024-12-09T11:20:32,322 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/ff20ec44b323bc84e4c317acb7d1b004/recovered.edits/0000000000000000002-wal-2.temp (wrote 1 edits, skipped 0 edits in 0 ms) 2024-12-09T11:20:32,325 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/ff20ec44b323bc84e4c317acb7d1b004/recovered.edits/0000000000000000002-wal-2.temp to hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/ff20ec44b323bc84e4c317acb7d1b004/recovered.edits/0000000000000000002 2024-12-09T11:20:32,325 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 1 edits across 1 Regions in 21 ms; skipped=0; WAL=hdfs://localhost:40493/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733743232182/wal-2, size=253, length=253, corrupted=false, cancelled=false 2024-12-09T11:20:32,325 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:40493/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733743232182/wal-2, journal: Splitting hdfs://localhost:40493/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733743232182/wal-2, size=253 (253bytes) at 1733743232299Finishing writing output for hdfs://localhost:40493/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733743232182/wal-2 so closing down at 1733743232306 (+7 ms)Creating recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/ff20ec44b323bc84e4c317acb7d1b004/recovered.edits/0000000000000000002-wal-2.temp at 1733743232310 (+4 ms)3 split writer threads finished at 1733743232311 (+1 ms)Closed recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/ff20ec44b323bc84e4c317acb7d1b004/recovered.edits/0000000000000000002-wal-2.temp (wrote 1 edits, skipped 0 edits in 0 ms) at 1733743232323 (+12 ms)Rename recovered edits hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/ff20ec44b323bc84e4c317acb7d1b004/recovered.edits/0000000000000000002-wal-2.temp to hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/ff20ec44b323bc84e4c317acb7d1b004/recovered.edits/0000000000000000002 at 1733743232325 (+2 ms)Processed 1 edits across 1 Regions in 21 ms; skipped=0; WAL=hdfs://localhost:40493/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733743232182/wal-2, size=253, length=253, corrupted=false, cancelled=false at 1733743232325 2024-12-09T11:20:32,342 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:40493/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733743232182/wal-1, size=320 (320bytes) 2024-12-09T11:20:32,342 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40493/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733743232182/wal-1 2024-12-09T11:20:32,343 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:40493/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733743232182/wal-1 after 1ms 2024-12-09T11:20:32,348 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:40493/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733743232182/wal-1: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-09T11:20:32,348 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:40493/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733743232182/wal-1 took 6ms 2024-12-09T11:20:32,351 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:40493/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733743232182/wal-1 so closing down 2024-12-09T11:20:32,351 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-09T11:20:32,353 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000001-wal-1.temp 2024-12-09T11:20:32,355 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/ff20ec44b323bc84e4c317acb7d1b004/recovered.edits/0000000000000000001-wal-1.temp 2024-12-09T11:20:32,355 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-09T11:20:32,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741850_1026 (size=320) 2024-12-09T11:20:32,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741850_1026 (size=320) 2024-12-09T11:20:32,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741850_1026 (size=320) 2024-12-09T11:20:32,365 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/ff20ec44b323bc84e4c317acb7d1b004/recovered.edits/0000000000000000001-wal-1.temp (wrote 2 edits, skipped 0 edits in 0 ms) 2024-12-09T11:20:32,371 DEBUG [split-log-closeStream-pool-0 {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/ff20ec44b323bc84e4c317acb7d1b004/recovered.edits/0000000000000000002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-09T11:20:32,376 WARN [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(175): Found existing old edits file. It could be the result of a previous failed split attempt or we have duplicated wal entries. Deleting hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/ff20ec44b323bc84e4c317acb7d1b004/recovered.edits/0000000000000000002, length=253 2024-12-09T11:20:32,379 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/ff20ec44b323bc84e4c317acb7d1b004/recovered.edits/0000000000000000001-wal-1.temp to hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/ff20ec44b323bc84e4c317acb7d1b004/recovered.edits/0000000000000000002 2024-12-09T11:20:32,379 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 2 edits across 1 Regions in 30 ms; skipped=0; WAL=hdfs://localhost:40493/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733743232182/wal-1, size=320, length=320, corrupted=false, cancelled=false 2024-12-09T11:20:32,380 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:40493/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733743232182/wal-1, journal: Splitting hdfs://localhost:40493/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733743232182/wal-1, size=320 (320bytes) at 1733743232342Finishing writing output for hdfs://localhost:40493/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733743232182/wal-1 so closing down at 1733743232351 (+9 ms)Creating recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/ff20ec44b323bc84e4c317acb7d1b004/recovered.edits/0000000000000000001-wal-1.temp at 1733743232355 (+4 ms)3 split writer threads finished at 1733743232355Closed recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/ff20ec44b323bc84e4c317acb7d1b004/recovered.edits/0000000000000000001-wal-1.temp (wrote 2 edits, skipped 0 edits in 0 ms) at 1733743232365 (+10 ms)Rename recovered edits hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/ff20ec44b323bc84e4c317acb7d1b004/recovered.edits/0000000000000000001-wal-1.temp to hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/ff20ec44b323bc84e4c317acb7d1b004/recovered.edits/0000000000000000002 at 1733743232379 (+14 ms)Processed 2 edits across 1 Regions in 30 ms; skipped=0; WAL=hdfs://localhost:40493/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733743232182/wal-1, size=320, length=320, corrupted=false, cancelled=false at 1733743232379 2024-12-09T11:20:32,380 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-09T11:20:32,383 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:40493/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733743232182, archiveDir=hdfs://localhost:40493/hbase/oldWALs, maxLogs=32 2024-12-09T11:20:32,404 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testnameconflictwhensplit1-manual,16010,1733743232182/wal.1733743232383, exclude list is [], retry=0 2024-12-09T11:20:32,409 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44093,DS-049b5d3a-b506-46ca-8e7a-3fb74c0e7d3e,DISK] 2024-12-09T11:20:32,409 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46359,DS-8f8100b1-22b1-43bf-9468-405dfca7481e,DISK] 2024-12-09T11:20:32,410 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34459,DS-f790a494-97c1-4864-a7b1-6442795d840b,DISK] 2024-12-09T11:20:32,413 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testnameconflictwhensplit1-manual,16010,1733743232182/wal.1733743232383 2024-12-09T11:20:32,414 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43581:43581),(127.0.0.1/127.0.0.1:39985:39985),(127.0.0.1/127.0.0.1:39935:39935)] 2024-12-09T11:20:32,414 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => ff20ec44b323bc84e4c317acb7d1b004, NAME => 'testReplayEditsWrittenIntoWAL,,1733743232184.ff20ec44b323bc84e4c317acb7d1b004.', STARTKEY => '', ENDKEY => ''} 2024-12-09T11:20:32,414 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1733743232184.ff20ec44b323bc84e4c317acb7d1b004.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:20:32,415 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for ff20ec44b323bc84e4c317acb7d1b004 2024-12-09T11:20:32,415 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for ff20ec44b323bc84e4c317acb7d1b004 2024-12-09T11:20:32,425 INFO [StoreOpener-ff20ec44b323bc84e4c317acb7d1b004-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region ff20ec44b323bc84e4c317acb7d1b004 2024-12-09T11:20:32,427 INFO [StoreOpener-ff20ec44b323bc84e4c317acb7d1b004-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ff20ec44b323bc84e4c317acb7d1b004 columnFamilyName a 2024-12-09T11:20:32,427 DEBUG [StoreOpener-ff20ec44b323bc84e4c317acb7d1b004-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:32,428 INFO [StoreOpener-ff20ec44b323bc84e4c317acb7d1b004-1 {}] regionserver.HStore(327): Store=ff20ec44b323bc84e4c317acb7d1b004/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:32,430 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for ff20ec44b323bc84e4c317acb7d1b004 2024-12-09T11:20:32,431 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/ff20ec44b323bc84e4c317acb7d1b004 2024-12-09T11:20:32,436 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/ff20ec44b323bc84e4c317acb7d1b004 2024-12-09T11:20:32,438 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/ff20ec44b323bc84e4c317acb7d1b004/recovered.edits/0000000000000000002 2024-12-09T11:20:32,443 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/ff20ec44b323bc84e4c317acb7d1b004/recovered.edits/0000000000000000002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-09T11:20:32,445 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 2, skipped 0, firstSequenceIdInLog=1, maxSequenceIdInLog=2, path=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/ff20ec44b323bc84e4c317acb7d1b004/recovered.edits/0000000000000000002 2024-12-09T11:20:32,445 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing ff20ec44b323bc84e4c317acb7d1b004 1/1 column families, dataSize=108 B heapSize=512 B 2024-12-09T11:20:32,488 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/ff20ec44b323bc84e4c317acb7d1b004/.tmp/a/52171b9e28e8455582408008893182dc is 58, key is testReplayEditsWrittenIntoWAL/a:1/1733743232252/Put/seqid=0 2024-12-09T11:20:32,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741852_1028 (size=5170) 2024-12-09T11:20:32,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741852_1028 (size=5170) 2024-12-09T11:20:32,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741852_1028 (size=5170) 2024-12-09T11:20:32,516 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=108 B at sequenceid=2 (bloomFilter=true), to=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/ff20ec44b323bc84e4c317acb7d1b004/.tmp/a/52171b9e28e8455582408008893182dc 2024-12-09T11:20:32,527 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/ff20ec44b323bc84e4c317acb7d1b004/.tmp/a/52171b9e28e8455582408008893182dc as hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/ff20ec44b323bc84e4c317acb7d1b004/a/52171b9e28e8455582408008893182dc 2024-12-09T11:20:32,537 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/ff20ec44b323bc84e4c317acb7d1b004/a/52171b9e28e8455582408008893182dc, entries=2, sequenceid=2, filesize=5.0 K 2024-12-09T11:20:32,537 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~108 B/108, heapSize ~496 B/496, currentSize=0 B/0 for ff20ec44b323bc84e4c317acb7d1b004 in 92ms, sequenceid=2, compaction requested=false; wal=null 2024-12-09T11:20:32,538 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/ff20ec44b323bc84e4c317acb7d1b004/recovered.edits/0000000000000000002 2024-12-09T11:20:32,539 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for ff20ec44b323bc84e4c317acb7d1b004 2024-12-09T11:20:32,539 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for ff20ec44b323bc84e4c317acb7d1b004 2024-12-09T11:20:32,542 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for ff20ec44b323bc84e4c317acb7d1b004 2024-12-09T11:20:32,546 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/ff20ec44b323bc84e4c317acb7d1b004/recovered.edits/2.seqid, newMaxSeqId=2, maxSeqId=1 2024-12-09T11:20:32,548 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened ff20ec44b323bc84e4c317acb7d1b004; next sequenceid=3; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70068054, jitterRate=0.044095367193222046}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T11:20:32,548 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for ff20ec44b323bc84e4c317acb7d1b004: Writing region info on filesystem at 1733743232415Initializing all the Stores at 1733743232417 (+2 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743232418 (+1 ms)Obtaining lock to block concurrent updates at 1733743232446 (+28 ms)Preparing flush snapshotting stores in ff20ec44b323bc84e4c317acb7d1b004 at 1733743232446Finished memstore snapshotting testReplayEditsWrittenIntoWAL,,1733743232184.ff20ec44b323bc84e4c317acb7d1b004., syncing WAL and waiting on mvcc, flushsize=dataSize=108, getHeapSize=496, getOffHeapSize=0, getCellsCount=2 at 1733743232446Flushing stores of testReplayEditsWrittenIntoWAL,,1733743232184.ff20ec44b323bc84e4c317acb7d1b004. at 1733743232446Flushing ff20ec44b323bc84e4c317acb7d1b004/a: creating writer at 1733743232446Flushing ff20ec44b323bc84e4c317acb7d1b004/a: appending metadata at 1733743232482 (+36 ms)Flushing ff20ec44b323bc84e4c317acb7d1b004/a: closing flushed file at 1733743232483 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5adc11ff: reopening flushed file at 1733743232525 (+42 ms)Finished flush of dataSize ~108 B/108, heapSize ~496 B/496, currentSize=0 B/0 for ff20ec44b323bc84e4c317acb7d1b004 in 92ms, sequenceid=2, compaction requested=false; wal=null at 1733743232537 (+12 ms)Cleaning up temporary data from old regions at 1733743232539 (+2 ms)Region opened successfully at 1733743232548 (+9 ms) 2024-12-09T11:20:32,569 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testNameConflictWhenSplit1 Thread=379 (was 369) Potentially hanging thread: PacketResponder: BP-32692473-172.17.0.3-1733743223895:blk_1073741851_1027, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-10-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-32692473-172.17.0.3-1733743223895:blk_1073741851_1027, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-10-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1633493794_22 at /127.0.0.1:60462 [Waiting for operation #14] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1633493794_22 at /127.0.0.1:60654 [Receiving block BP-32692473-172.17.0.3-1733743223895:blk_1073741851_1027] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-32692473-172.17.0.3-1733743223895:blk_1073741851_1027, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-10-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1633493794_22 at /127.0.0.1:58640 [Receiving block BP-32692473-172.17.0.3-1733743223895:blk_1073741851_1027] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1633493794_22 at /127.0.0.1:58614 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1633493794_22 at /127.0.0.1:36012 [Receiving block BP-32692473-172.17.0.3-1733743223895:blk_1073741851_1027] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1633493794_22 at /127.0.0.1:35860 [Waiting for operation #18] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=773 (was 691) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=340 (was 340), ProcessCount=11 (was 11), AvailableMemoryMB=1322 (was 1341) 2024-12-09T11:20:32,580 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsWrittenIntoWAL Thread=379, OpenFileDescriptor=773, MaxFileDescriptor=1048576, SystemLoadAverage=340, ProcessCount=11, AvailableMemoryMB=1321 2024-12-09T11:20:32,596 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T11:20:32,599 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T11:20:32,600 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T11:20:32,603 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-86043881, suffix=, logDir=hdfs://localhost:40493/hbase/WALs/hregion-86043881, archiveDir=hdfs://localhost:40493/hbase/oldWALs, maxLogs=32 2024-12-09T11:20:32,625 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-86043881/hregion-86043881.1733743232604, exclude list is [], retry=0 2024-12-09T11:20:32,628 DEBUG [AsyncFSWAL-12-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44093,DS-049b5d3a-b506-46ca-8e7a-3fb74c0e7d3e,DISK] 2024-12-09T11:20:32,629 DEBUG [AsyncFSWAL-12-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34459,DS-f790a494-97c1-4864-a7b1-6442795d840b,DISK] 2024-12-09T11:20:32,630 DEBUG [AsyncFSWAL-12-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46359,DS-8f8100b1-22b1-43bf-9468-405dfca7481e,DISK] 2024-12-09T11:20:32,632 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-86043881/hregion-86043881.1733743232604 2024-12-09T11:20:32,632 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43581:43581),(127.0.0.1/127.0.0.1:39935:39935),(127.0.0.1/127.0.0.1:39985:39985)] 2024-12-09T11:20:32,632 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 3f14fc1ab19b4b6fcb78aa6042ad64f3, NAME => 'testReplayEditsWrittenIntoWAL,,1733743232597.3f14fc1ab19b4b6fcb78aa6042ad64f3.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenIntoWAL', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40493/hbase 2024-12-09T11:20:32,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741854_1030 (size=64) 2024-12-09T11:20:32,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741854_1030 (size=64) 2024-12-09T11:20:32,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741854_1030 (size=64) 2024-12-09T11:20:32,648 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1733743232597.3f14fc1ab19b4b6fcb78aa6042ad64f3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:20:32,651 INFO [StoreOpener-3f14fc1ab19b4b6fcb78aa6042ad64f3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 3f14fc1ab19b4b6fcb78aa6042ad64f3 2024-12-09T11:20:32,653 INFO [StoreOpener-3f14fc1ab19b4b6fcb78aa6042ad64f3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3f14fc1ab19b4b6fcb78aa6042ad64f3 columnFamilyName a 2024-12-09T11:20:32,653 DEBUG [StoreOpener-3f14fc1ab19b4b6fcb78aa6042ad64f3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:32,653 INFO [StoreOpener-3f14fc1ab19b4b6fcb78aa6042ad64f3-1 {}] regionserver.HStore(327): Store=3f14fc1ab19b4b6fcb78aa6042ad64f3/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:32,654 INFO [StoreOpener-3f14fc1ab19b4b6fcb78aa6042ad64f3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 3f14fc1ab19b4b6fcb78aa6042ad64f3 2024-12-09T11:20:32,655 INFO [StoreOpener-3f14fc1ab19b4b6fcb78aa6042ad64f3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3f14fc1ab19b4b6fcb78aa6042ad64f3 columnFamilyName b 2024-12-09T11:20:32,655 DEBUG [StoreOpener-3f14fc1ab19b4b6fcb78aa6042ad64f3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:32,656 INFO [StoreOpener-3f14fc1ab19b4b6fcb78aa6042ad64f3-1 {}] regionserver.HStore(327): Store=3f14fc1ab19b4b6fcb78aa6042ad64f3/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:32,656 INFO [StoreOpener-3f14fc1ab19b4b6fcb78aa6042ad64f3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 3f14fc1ab19b4b6fcb78aa6042ad64f3 2024-12-09T11:20:32,657 INFO [StoreOpener-3f14fc1ab19b4b6fcb78aa6042ad64f3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3f14fc1ab19b4b6fcb78aa6042ad64f3 columnFamilyName c 2024-12-09T11:20:32,658 DEBUG [StoreOpener-3f14fc1ab19b4b6fcb78aa6042ad64f3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:32,658 INFO [StoreOpener-3f14fc1ab19b4b6fcb78aa6042ad64f3-1 {}] regionserver.HStore(327): Store=3f14fc1ab19b4b6fcb78aa6042ad64f3/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:32,658 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 3f14fc1ab19b4b6fcb78aa6042ad64f3 2024-12-09T11:20:32,659 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/3f14fc1ab19b4b6fcb78aa6042ad64f3 2024-12-09T11:20:32,660 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/3f14fc1ab19b4b6fcb78aa6042ad64f3 2024-12-09T11:20:32,662 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 3f14fc1ab19b4b6fcb78aa6042ad64f3 2024-12-09T11:20:32,662 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 3f14fc1ab19b4b6fcb78aa6042ad64f3 2024-12-09T11:20:32,663 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenIntoWAL descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-09T11:20:32,664 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 3f14fc1ab19b4b6fcb78aa6042ad64f3 2024-12-09T11:20:32,668 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/3f14fc1ab19b4b6fcb78aa6042ad64f3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T11:20:32,668 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 3f14fc1ab19b4b6fcb78aa6042ad64f3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73248075, jitterRate=0.09148137271404266}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-09T11:20:32,669 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 3f14fc1ab19b4b6fcb78aa6042ad64f3: Writing region info on filesystem at 1733743232649Initializing all the Stores at 1733743232650 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743232651 (+1 ms)Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743232651Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743232651Cleaning up temporary data from old regions at 1733743232662 (+11 ms)Region opened successfully at 1733743232669 (+7 ms) 2024-12-09T11:20:32,669 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 3f14fc1ab19b4b6fcb78aa6042ad64f3, disabling compactions & flushes 2024-12-09T11:20:32,669 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenIntoWAL,,1733743232597.3f14fc1ab19b4b6fcb78aa6042ad64f3. 2024-12-09T11:20:32,669 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenIntoWAL,,1733743232597.3f14fc1ab19b4b6fcb78aa6042ad64f3. 2024-12-09T11:20:32,669 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenIntoWAL,,1733743232597.3f14fc1ab19b4b6fcb78aa6042ad64f3. after waiting 0 ms 2024-12-09T11:20:32,669 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenIntoWAL,,1733743232597.3f14fc1ab19b4b6fcb78aa6042ad64f3. 2024-12-09T11:20:32,670 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenIntoWAL,,1733743232597.3f14fc1ab19b4b6fcb78aa6042ad64f3. 2024-12-09T11:20:32,670 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 3f14fc1ab19b4b6fcb78aa6042ad64f3: Waiting for close lock at 1733743232669Disabling compacts and flushes for region at 1733743232669Disabling writes for close at 1733743232669Writing region close event to WAL at 1733743232670 (+1 ms)Closed at 1733743232670 2024-12-09T11:20:32,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741853_1029 (size=95) 2024-12-09T11:20:32,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741853_1029 (size=95) 2024-12-09T11:20:32,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741853_1029 (size=95) 2024-12-09T11:20:32,677 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-09T11:20:32,677 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-86043881:(num 1733743232604) 2024-12-09T11:20:32,677 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-09T11:20:32,680 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:40493/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733743232595, archiveDir=hdfs://localhost:40493/hbase/oldWALs, maxLogs=32 2024-12-09T11:20:32,694 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733743232595/wal.1733743232680, exclude list is [], retry=0 2024-12-09T11:20:32,697 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34459,DS-f790a494-97c1-4864-a7b1-6442795d840b,DISK] 2024-12-09T11:20:32,698 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46359,DS-8f8100b1-22b1-43bf-9468-405dfca7481e,DISK] 2024-12-09T11:20:32,698 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44093,DS-049b5d3a-b506-46ca-8e7a-3fb74c0e7d3e,DISK] 2024-12-09T11:20:32,700 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733743232595/wal.1733743232680 2024-12-09T11:20:32,701 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39935:39935),(127.0.0.1/127.0.0.1:39985:39985),(127.0.0.1/127.0.0.1:43581:43581)] 2024-12-09T11:20:32,897 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:40493/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733743232595/wal.1733743232680, size=0 (0bytes) 2024-12-09T11:20:32,897 WARN [Time-limited test {}] wal.WALSplitter(453): File hdfs://localhost:40493/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733743232595/wal.1733743232680 might be still open, length is 0 2024-12-09T11:20:32,897 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40493/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733743232595/wal.1733743232680 2024-12-09T11:20:32,898 WARN [IPC Server handler 1 on default port 40493 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733743232595/wal.1733743232680 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741855_1031 2024-12-09T11:20:32,899 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40493/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733743232595/wal.1733743232680 after 2ms 2024-12-09T11:20:35,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741831_1007 (size=1321) 2024-12-09T11:20:35,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741832_1008 (size=32) 2024-12-09T11:20:35,424 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1633493794_22 at /127.0.0.1:58682 [Receiving block BP-32692473-172.17.0.3-1733743223895:blk_1073741855_1031] {}] datanode.DataXceiver(331): 127.0.0.1:34459:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58682 dst: /127.0.0.1:34459 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:34459 remote=/127.0.0.1:58682]. Total timeout mills is 60000, 57436 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:20:35,425 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1633493794_22 at /127.0.0.1:36052 [Receiving block BP-32692473-172.17.0.3-1733743223895:blk_1073741855_1031] {}] datanode.DataXceiver(331): 127.0.0.1:44093:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36052 dst: /127.0.0.1:44093 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:20:35,425 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1633493794_22 at /127.0.0.1:60694 [Receiving block BP-32692473-172.17.0.3-1733743223895:blk_1073741855_1031] {}] datanode.DataXceiver(331): 127.0.0.1:46359:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60694 dst: /127.0.0.1:46359 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:20:35,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741855_1032 (size=263633) 2024-12-09T11:20:35,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741855_1032 (size=263633) 2024-12-09T11:20:35,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741855_1032 (size=263633) 2024-12-09T11:20:36,307 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-09T11:20:36,354 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T11:20:36,900 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:40493/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733743232595/wal.1733743232680 after 4003ms 2024-12-09T11:20:36,923 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:40493/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733743232595/wal.1733743232680: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-09T11:20:36,926 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:40493/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733743232595/wal.1733743232680 took 4028ms 2024-12-09T11:20:36,932 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000001-wal.1733743232680.temp 2024-12-09T11:20:36,966 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/3f14fc1ab19b4b6fcb78aa6042ad64f3/recovered.edits/0000000000000000001-wal.1733743232680.temp 2024-12-09T11:20:37,127 INFO [Time-limited test {}] wal.WALSplitter(493): EOF from hdfs://localhost:40493/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733743232595/wal.1733743232680; continuing. 2024-12-09T11:20:37,127 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:40493/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733743232595/wal.1733743232680 so closing down 2024-12-09T11:20:37,128 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-09T11:20:37,129 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-09T11:20:37,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741856_1033 (size=263641) 2024-12-09T11:20:37,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741856_1033 (size=263641) 2024-12-09T11:20:37,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741856_1033 (size=263641) 2024-12-09T11:20:37,153 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/3f14fc1ab19b4b6fcb78aa6042ad64f3/recovered.edits/0000000000000000001-wal.1733743232680.temp (wrote 3002 edits, skipped 0 edits in 74 ms) 2024-12-09T11:20:37,157 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/3f14fc1ab19b4b6fcb78aa6042ad64f3/recovered.edits/0000000000000000001-wal.1733743232680.temp to hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/3f14fc1ab19b4b6fcb78aa6042ad64f3/recovered.edits/0000000000000003002 2024-12-09T11:20:37,158 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 3002 edits across 1 Regions in 232 ms; skipped=0; WAL=hdfs://localhost:40493/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733743232595/wal.1733743232680, size=0, length=0, corrupted=false, cancelled=false 2024-12-09T11:20:37,158 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:40493/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733743232595/wal.1733743232680, journal: Splitting hdfs://localhost:40493/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733743232595/wal.1733743232680, size=0 (0bytes) at 1733743232897Creating recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/3f14fc1ab19b4b6fcb78aa6042ad64f3/recovered.edits/0000000000000000001-wal.1733743232680.temp at 1733743236967 (+4070 ms)Split 1024 edits, skipped 0 edits. at 1733743237046 (+79 ms)Split 2048 edits, skipped 0 edits. at 1733743237096 (+50 ms)Finishing writing output for hdfs://localhost:40493/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733743232595/wal.1733743232680 so closing down at 1733743237128 (+32 ms)3 split writer threads finished at 1733743237129 (+1 ms)Closed recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/3f14fc1ab19b4b6fcb78aa6042ad64f3/recovered.edits/0000000000000000001-wal.1733743232680.temp (wrote 3002 edits, skipped 0 edits in 74 ms) at 1733743237153 (+24 ms)Rename recovered edits hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/3f14fc1ab19b4b6fcb78aa6042ad64f3/recovered.edits/0000000000000000001-wal.1733743232680.temp to hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/3f14fc1ab19b4b6fcb78aa6042ad64f3/recovered.edits/0000000000000003002 at 1733743237157 (+4 ms)Processed 3002 edits across 1 Regions in 232 ms; skipped=0; WAL=hdfs://localhost:40493/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733743232595/wal.1733743232680, size=0, length=0, corrupted=false, cancelled=false at 1733743237158 (+1 ms) 2024-12-09T11:20:37,186 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:40493/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733743232595/wal.1733743232680 to hdfs://localhost:40493/hbase/oldWALs/wal.1733743232680 2024-12-09T11:20:37,189 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/3f14fc1ab19b4b6fcb78aa6042ad64f3/recovered.edits/0000000000000003002 2024-12-09T11:20:37,189 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-09T11:20:37,194 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:40493/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733743232595, archiveDir=hdfs://localhost:40493/hbase/oldWALs, maxLogs=32 2024-12-09T11:20:37,215 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733743232595/wal.1733743237195, exclude list is [], retry=0 2024-12-09T11:20:37,222 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34459,DS-f790a494-97c1-4864-a7b1-6442795d840b,DISK] 2024-12-09T11:20:37,222 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44093,DS-049b5d3a-b506-46ca-8e7a-3fb74c0e7d3e,DISK] 2024-12-09T11:20:37,223 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46359,DS-8f8100b1-22b1-43bf-9468-405dfca7481e,DISK] 2024-12-09T11:20:37,242 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733743232595/wal.1733743237195 2024-12-09T11:20:37,251 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39935:39935),(127.0.0.1/127.0.0.1:43581:43581),(127.0.0.1/127.0.0.1:39985:39985)] 2024-12-09T11:20:37,251 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1733743232597.3f14fc1ab19b4b6fcb78aa6042ad64f3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:20:37,254 INFO [StoreOpener-3f14fc1ab19b4b6fcb78aa6042ad64f3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 3f14fc1ab19b4b6fcb78aa6042ad64f3 2024-12-09T11:20:37,256 INFO [StoreOpener-3f14fc1ab19b4b6fcb78aa6042ad64f3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3f14fc1ab19b4b6fcb78aa6042ad64f3 columnFamilyName a 2024-12-09T11:20:37,256 DEBUG [StoreOpener-3f14fc1ab19b4b6fcb78aa6042ad64f3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:37,257 INFO [StoreOpener-3f14fc1ab19b4b6fcb78aa6042ad64f3-1 {}] regionserver.HStore(327): Store=3f14fc1ab19b4b6fcb78aa6042ad64f3/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:37,257 INFO [StoreOpener-3f14fc1ab19b4b6fcb78aa6042ad64f3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 3f14fc1ab19b4b6fcb78aa6042ad64f3 2024-12-09T11:20:37,258 INFO [StoreOpener-3f14fc1ab19b4b6fcb78aa6042ad64f3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3f14fc1ab19b4b6fcb78aa6042ad64f3 columnFamilyName b 2024-12-09T11:20:37,258 DEBUG [StoreOpener-3f14fc1ab19b4b6fcb78aa6042ad64f3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:37,259 INFO [StoreOpener-3f14fc1ab19b4b6fcb78aa6042ad64f3-1 {}] regionserver.HStore(327): Store=3f14fc1ab19b4b6fcb78aa6042ad64f3/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:37,260 INFO [StoreOpener-3f14fc1ab19b4b6fcb78aa6042ad64f3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 3f14fc1ab19b4b6fcb78aa6042ad64f3 2024-12-09T11:20:37,261 INFO [StoreOpener-3f14fc1ab19b4b6fcb78aa6042ad64f3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3f14fc1ab19b4b6fcb78aa6042ad64f3 columnFamilyName c 2024-12-09T11:20:37,261 DEBUG [StoreOpener-3f14fc1ab19b4b6fcb78aa6042ad64f3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:37,265 INFO [StoreOpener-3f14fc1ab19b4b6fcb78aa6042ad64f3-1 {}] regionserver.HStore(327): Store=3f14fc1ab19b4b6fcb78aa6042ad64f3/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:37,265 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 3f14fc1ab19b4b6fcb78aa6042ad64f3 2024-12-09T11:20:37,267 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/3f14fc1ab19b4b6fcb78aa6042ad64f3 2024-12-09T11:20:37,270 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/3f14fc1ab19b4b6fcb78aa6042ad64f3 2024-12-09T11:20:37,275 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/3f14fc1ab19b4b6fcb78aa6042ad64f3/recovered.edits/0000000000000003002 2024-12-09T11:20:37,279 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/3f14fc1ab19b4b6fcb78aa6042ad64f3/recovered.edits/0000000000000003002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-09T11:20:37,369 INFO [Time-limited test {}] wal.AbstractTestWALReplay$4$1(796): InternalFlushCache Invoked 2024-12-09T11:20:37,861 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 3f14fc1ab19b4b6fcb78aa6042ad64f3 3/3 column families, dataSize=42.49 KB heapSize=100.11 KB 2024-12-09T11:20:37,916 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/3f14fc1ab19b4b6fcb78aa6042ad64f3/.tmp/a/433f8e1a2ae14356af50890b487f5734 is 62, key is testReplayEditsWrittenIntoWAL/a:100/1733743232707/Put/seqid=0 2024-12-09T11:20:37,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741858_1035 (size=50463) 2024-12-09T11:20:37,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741858_1035 (size=50463) 2024-12-09T11:20:37,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741858_1035 (size=50463) 2024-12-09T11:20:37,946 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=754 (bloomFilter=true), to=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/3f14fc1ab19b4b6fcb78aa6042ad64f3/.tmp/a/433f8e1a2ae14356af50890b487f5734 2024-12-09T11:20:37,957 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/3f14fc1ab19b4b6fcb78aa6042ad64f3/.tmp/a/433f8e1a2ae14356af50890b487f5734 as hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/3f14fc1ab19b4b6fcb78aa6042ad64f3/a/433f8e1a2ae14356af50890b487f5734 2024-12-09T11:20:37,970 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/3f14fc1ab19b4b6fcb78aa6042ad64f3/a/433f8e1a2ae14356af50890b487f5734, entries=754, sequenceid=754, filesize=49.3 K 2024-12-09T11:20:37,970 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~42.49 KB/43512, heapSize ~99.59 KB/101984, currentSize=0 B/0 for 3f14fc1ab19b4b6fcb78aa6042ad64f3 in 110ms, sequenceid=754, compaction requested=false; wal=null 2024-12-09T11:20:37,994 INFO [Time-limited test {}] wal.AbstractTestWALReplay$4$1(796): InternalFlushCache Invoked 2024-12-09T11:20:37,994 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 3f14fc1ab19b4b6fcb78aa6042ad64f3 3/3 column families, dataSize=42.49 KB heapSize=100.11 KB 2024-12-09T11:20:38,010 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/3f14fc1ab19b4b6fcb78aa6042ad64f3/.tmp/a/3f39c54125a04b8cb99d5e640831f930 is 62, key is testReplayEditsWrittenIntoWAL/a:754/1733743232737/Put/seqid=0 2024-12-09T11:20:38,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741859_1036 (size=20072) 2024-12-09T11:20:38,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741859_1036 (size=20072) 2024-12-09T11:20:38,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741859_1036 (size=20072) 2024-12-09T11:20:38,023 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.93 KB at sequenceid=1508 (bloomFilter=true), to=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/3f14fc1ab19b4b6fcb78aa6042ad64f3/.tmp/a/3f39c54125a04b8cb99d5e640831f930 2024-12-09T11:20:38,057 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/3f14fc1ab19b4b6fcb78aa6042ad64f3/.tmp/b/3e15564ff8e845978702016a25078e8f is 62, key is testReplayEditsWrittenIntoWAL/b:100/1733743232774/Put/seqid=0 2024-12-09T11:20:38,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741860_1037 (size=35835) 2024-12-09T11:20:38,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741860_1037 (size=35835) 2024-12-09T11:20:38,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741860_1037 (size=35835) 2024-12-09T11:20:38,067 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=28.56 KB at sequenceid=1508 (bloomFilter=true), to=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/3f14fc1ab19b4b6fcb78aa6042ad64f3/.tmp/b/3e15564ff8e845978702016a25078e8f 2024-12-09T11:20:38,075 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/3f14fc1ab19b4b6fcb78aa6042ad64f3/.tmp/a/3f39c54125a04b8cb99d5e640831f930 as hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/3f14fc1ab19b4b6fcb78aa6042ad64f3/a/3f39c54125a04b8cb99d5e640831f930 2024-12-09T11:20:38,082 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/3f14fc1ab19b4b6fcb78aa6042ad64f3/a/3f39c54125a04b8cb99d5e640831f930, entries=246, sequenceid=1508, filesize=19.6 K 2024-12-09T11:20:38,084 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/3f14fc1ab19b4b6fcb78aa6042ad64f3/.tmp/b/3e15564ff8e845978702016a25078e8f as hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/3f14fc1ab19b4b6fcb78aa6042ad64f3/b/3e15564ff8e845978702016a25078e8f 2024-12-09T11:20:38,090 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/3f14fc1ab19b4b6fcb78aa6042ad64f3/b/3e15564ff8e845978702016a25078e8f, entries=508, sequenceid=1508, filesize=35.0 K 2024-12-09T11:20:38,090 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~42.49 KB/43512, heapSize ~99.83 KB/102224, currentSize=0 B/0 for 3f14fc1ab19b4b6fcb78aa6042ad64f3 in 96ms, sequenceid=1508, compaction requested=false; wal=null 2024-12-09T11:20:38,106 INFO [Time-limited test {}] wal.AbstractTestWALReplay$4$1(796): InternalFlushCache Invoked 2024-12-09T11:20:38,106 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 3f14fc1ab19b4b6fcb78aa6042ad64f3 3/3 column families, dataSize=42.49 KB heapSize=100.11 KB 2024-12-09T11:20:38,113 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/3f14fc1ab19b4b6fcb78aa6042ad64f3/.tmp/b/b2ec11adbb554b4a8703bb77b42323f0 is 62, key is testReplayEditsWrittenIntoWAL/b:508/1733743232791/Put/seqid=0 2024-12-09T11:20:38,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741861_1038 (size=35082) 2024-12-09T11:20:38,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741861_1038 (size=35082) 2024-12-09T11:20:38,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741861_1038 (size=35082) 2024-12-09T11:20:38,127 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=27.87 KB at sequenceid=2262 (bloomFilter=true), to=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/3f14fc1ab19b4b6fcb78aa6042ad64f3/.tmp/b/b2ec11adbb554b4a8703bb77b42323f0 2024-12-09T11:20:38,147 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T11:20:38,148 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-09T11:20:38,150 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T11:20:38,150 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-09T11:20:38,150 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-09T11:20:38,150 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-09T11:20:38,151 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsWrittenIntoWAL 2024-12-09T11:20:38,151 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsWrittenIntoWAL Metrics about Tables on a single HBase RegionServer 2024-12-09T11:20:38,151 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/3f14fc1ab19b4b6fcb78aa6042ad64f3/.tmp/c/6c0ae8e8e100482dbb441da371535246 is 62, key is testReplayEditsWrittenIntoWAL/c:100/1733743232820/Put/seqid=0 2024-12-09T11:20:38,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741862_1039 (size=20825) 2024-12-09T11:20:38,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741862_1039 (size=20825) 2024-12-09T11:20:38,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741862_1039 (size=20825) 2024-12-09T11:20:38,161 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.63 KB at sequenceid=2262 (bloomFilter=true), to=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/3f14fc1ab19b4b6fcb78aa6042ad64f3/.tmp/c/6c0ae8e8e100482dbb441da371535246 2024-12-09T11:20:38,171 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/3f14fc1ab19b4b6fcb78aa6042ad64f3/.tmp/b/b2ec11adbb554b4a8703bb77b42323f0 as hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/3f14fc1ab19b4b6fcb78aa6042ad64f3/b/b2ec11adbb554b4a8703bb77b42323f0 2024-12-09T11:20:38,178 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/3f14fc1ab19b4b6fcb78aa6042ad64f3/b/b2ec11adbb554b4a8703bb77b42323f0, entries=492, sequenceid=2262, filesize=34.3 K 2024-12-09T11:20:38,179 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/3f14fc1ab19b4b6fcb78aa6042ad64f3/.tmp/c/6c0ae8e8e100482dbb441da371535246 as hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/3f14fc1ab19b4b6fcb78aa6042ad64f3/c/6c0ae8e8e100482dbb441da371535246 2024-12-09T11:20:38,186 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/3f14fc1ab19b4b6fcb78aa6042ad64f3/c/6c0ae8e8e100482dbb441da371535246, entries=262, sequenceid=2262, filesize=20.3 K 2024-12-09T11:20:38,186 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~42.49 KB/43512, heapSize ~99.83 KB/102224, currentSize=0 B/0 for 3f14fc1ab19b4b6fcb78aa6042ad64f3 in 80ms, sequenceid=2262, compaction requested=false; wal=null 2024-12-09T11:20:38,201 WARN [Time-limited test {}] regionserver.HRegion(5722): No family for cell testReplayEditsWrittenIntoWAL/another family:testReplayEditsWrittenIntoWAL/1733743232858/Put/vlen=29/seqid=0 in region testReplayEditsWrittenIntoWAL,,1733743232597.3f14fc1ab19b4b6fcb78aa6042ad64f3. 2024-12-09T11:20:38,205 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 3001, skipped 1, firstSequenceIdInLog=1, maxSequenceIdInLog=3002, path=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/3f14fc1ab19b4b6fcb78aa6042ad64f3/recovered.edits/0000000000000003002 2024-12-09T11:20:38,206 INFO [Time-limited test {}] wal.AbstractTestWALReplay$4$1(796): InternalFlushCache Invoked 2024-12-09T11:20:38,206 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 3f14fc1ab19b4b6fcb78aa6042ad64f3 3/3 column families, dataSize=41.85 KB heapSize=98.89 KB 2024-12-09T11:20:38,216 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/3f14fc1ab19b4b6fcb78aa6042ad64f3/.tmp/c/982ed13801da412ca764fdda43d86b15 is 62, key is testReplayEditsWrittenIntoWAL/c:262/1733743232828/Put/seqid=0 2024-12-09T11:20:38,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741863_1040 (size=50301) 2024-12-09T11:20:38,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741863_1040 (size=50301) 2024-12-09T11:20:38,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741863_1040 (size=50301) 2024-12-09T11:20:38,226 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=41.85 KB at sequenceid=3002 (bloomFilter=true), to=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/3f14fc1ab19b4b6fcb78aa6042ad64f3/.tmp/c/982ed13801da412ca764fdda43d86b15 2024-12-09T11:20:38,234 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 982ed13801da412ca764fdda43d86b15 2024-12-09T11:20:38,236 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/3f14fc1ab19b4b6fcb78aa6042ad64f3/.tmp/c/982ed13801da412ca764fdda43d86b15 as hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/3f14fc1ab19b4b6fcb78aa6042ad64f3/c/982ed13801da412ca764fdda43d86b15 2024-12-09T11:20:38,244 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 982ed13801da412ca764fdda43d86b15 2024-12-09T11:20:38,244 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/3f14fc1ab19b4b6fcb78aa6042ad64f3/c/982ed13801da412ca764fdda43d86b15, entries=739, sequenceid=3002, filesize=49.1 K 2024-12-09T11:20:38,244 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~41.85 KB/42854, heapSize ~98.38 KB/100736, currentSize=0 B/0 for 3f14fc1ab19b4b6fcb78aa6042ad64f3 in 38ms, sequenceid=3002, compaction requested=false; wal=null 2024-12-09T11:20:38,245 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/3f14fc1ab19b4b6fcb78aa6042ad64f3/recovered.edits/0000000000000003002 2024-12-09T11:20:38,246 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 3f14fc1ab19b4b6fcb78aa6042ad64f3 2024-12-09T11:20:38,247 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 3f14fc1ab19b4b6fcb78aa6042ad64f3 2024-12-09T11:20:38,247 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenIntoWAL descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T11:20:38,249 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 3f14fc1ab19b4b6fcb78aa6042ad64f3 2024-12-09T11:20:38,251 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenIntoWAL/3f14fc1ab19b4b6fcb78aa6042ad64f3/recovered.edits/3002.seqid, newMaxSeqId=3002, maxSeqId=1 2024-12-09T11:20:38,252 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 3f14fc1ab19b4b6fcb78aa6042ad64f3; next sequenceid=3003; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=204800, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62057677, jitterRate=-0.07526855170726776}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T11:20:38,253 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 3f14fc1ab19b4b6fcb78aa6042ad64f3: Writing region info on filesystem at 1733743237251Initializing all the Stores at 1733743237253 (+2 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743237254 (+1 ms)Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743237254Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743237254Cleaning up temporary data from old regions at 1733743238247 (+993 ms)Region opened successfully at 1733743238253 (+6 ms) 2024-12-09T11:20:38,323 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 3f14fc1ab19b4b6fcb78aa6042ad64f3, disabling compactions & flushes 2024-12-09T11:20:38,323 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenIntoWAL,,1733743232597.3f14fc1ab19b4b6fcb78aa6042ad64f3. 2024-12-09T11:20:38,323 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenIntoWAL,,1733743232597.3f14fc1ab19b4b6fcb78aa6042ad64f3. 2024-12-09T11:20:38,323 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenIntoWAL,,1733743232597.3f14fc1ab19b4b6fcb78aa6042ad64f3. after waiting 0 ms 2024-12-09T11:20:38,323 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenIntoWAL,,1733743232597.3f14fc1ab19b4b6fcb78aa6042ad64f3. 2024-12-09T11:20:38,338 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenIntoWAL,,1733743232597.3f14fc1ab19b4b6fcb78aa6042ad64f3. 2024-12-09T11:20:38,338 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 3f14fc1ab19b4b6fcb78aa6042ad64f3: Waiting for close lock at 1733743238323Disabling compacts and flushes for region at 1733743238323Disabling writes for close at 1733743238323Writing region close event to WAL at 1733743238338 (+15 ms)Closed at 1733743238338 2024-12-09T11:20:38,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741857_1034 (size=95) 2024-12-09T11:20:38,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741857_1034 (size=95) 2024-12-09T11:20:38,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741857_1034 (size=95) 2024-12-09T11:20:38,351 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-09T11:20:38,352 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1733743237195) 2024-12-09T11:20:38,369 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsWrittenIntoWAL Thread=395 (was 379) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: LeaseRenewer:jenkins.replay.wal.secondtime@localhost:40493 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1313953972_22 at /127.0.0.1:60748 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1319272049) connection to localhost/127.0.0.1:34527 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/cluster_b02fd66c-c8dc-38fe-f31f-89876c0daa74/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1313953972_22 at /127.0.0.1:58778 [Waiting for operation #13] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34527 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1313953972_22 at /127.0.0.1:58740 [Waiting for operation #8] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/cluster_b02fd66c-c8dc-38fe-f31f-89876c0daa74/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45319 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-12-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-12-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-12-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1313953972_22 at /127.0.0.1:36088 [Waiting for operation #22] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1319272049) connection to localhost/127.0.0.1:40493 from jenkins.replay.wal.secondtime java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: java.util.concurrent.ThreadPoolExecutor$Worker@69039562[State = -1, empty queue] java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1319272049) connection to localhost/127.0.0.1:45319 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: java.util.concurrent.ThreadPoolExecutor$Worker@1252a6e[State = -1, empty queue] java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=853 (was 773) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=337 (was 340), ProcessCount=11 (was 11), AvailableMemoryMB=1275 (was 1321) 2024-12-09T11:20:38,389 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#test2727 Thread=395, OpenFileDescriptor=853, MaxFileDescriptor=1048576, SystemLoadAverage=337, ProcessCount=11, AvailableMemoryMB=1273 2024-12-09T11:20:38,413 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T11:20:38,416 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T11:20:38,417 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T11:20:38,421 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-75480012, suffix=, logDir=hdfs://localhost:40493/hbase/WALs/hregion-75480012, archiveDir=hdfs://localhost:40493/hbase/oldWALs, maxLogs=32 2024-12-09T11:20:38,442 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-75480012/hregion-75480012.1733743238421, exclude list is [], retry=0 2024-12-09T11:20:38,446 DEBUG [AsyncFSWAL-14-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44093,DS-049b5d3a-b506-46ca-8e7a-3fb74c0e7d3e,DISK] 2024-12-09T11:20:38,447 DEBUG [AsyncFSWAL-14-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46359,DS-8f8100b1-22b1-43bf-9468-405dfca7481e,DISK] 2024-12-09T11:20:38,449 DEBUG [AsyncFSWAL-14-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34459,DS-f790a494-97c1-4864-a7b1-6442795d840b,DISK] 2024-12-09T11:20:38,475 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-75480012/hregion-75480012.1733743238421 2024-12-09T11:20:38,477 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43581:43581),(127.0.0.1/127.0.0.1:39985:39985),(127.0.0.1/127.0.0.1:39935:39935)] 2024-12-09T11:20:38,477 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 2c19a2973bce9d6bae27713ac9f6f226, NAME => 'test2727,,1733743238414.2c19a2973bce9d6bae27713ac9f6f226.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='test2727', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40493/hbase 2024-12-09T11:20:38,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741865_1042 (size=43) 2024-12-09T11:20:38,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741865_1042 (size=43) 2024-12-09T11:20:38,537 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated test2727,,1733743238414.2c19a2973bce9d6bae27713ac9f6f226.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:20:38,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741865_1042 (size=43) 2024-12-09T11:20:38,543 INFO [StoreOpener-2c19a2973bce9d6bae27713ac9f6f226-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 2c19a2973bce9d6bae27713ac9f6f226 2024-12-09T11:20:38,546 INFO [StoreOpener-2c19a2973bce9d6bae27713ac9f6f226-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2c19a2973bce9d6bae27713ac9f6f226 columnFamilyName a 2024-12-09T11:20:38,546 DEBUG [StoreOpener-2c19a2973bce9d6bae27713ac9f6f226-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:38,548 INFO [StoreOpener-2c19a2973bce9d6bae27713ac9f6f226-1 {}] regionserver.HStore(327): Store=2c19a2973bce9d6bae27713ac9f6f226/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:38,548 INFO [StoreOpener-2c19a2973bce9d6bae27713ac9f6f226-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 2c19a2973bce9d6bae27713ac9f6f226 2024-12-09T11:20:38,552 INFO [StoreOpener-2c19a2973bce9d6bae27713ac9f6f226-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2c19a2973bce9d6bae27713ac9f6f226 columnFamilyName b 2024-12-09T11:20:38,552 DEBUG [StoreOpener-2c19a2973bce9d6bae27713ac9f6f226-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:38,553 INFO [StoreOpener-2c19a2973bce9d6bae27713ac9f6f226-1 {}] regionserver.HStore(327): Store=2c19a2973bce9d6bae27713ac9f6f226/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:38,553 INFO [StoreOpener-2c19a2973bce9d6bae27713ac9f6f226-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 2c19a2973bce9d6bae27713ac9f6f226 2024-12-09T11:20:38,555 INFO [StoreOpener-2c19a2973bce9d6bae27713ac9f6f226-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2c19a2973bce9d6bae27713ac9f6f226 columnFamilyName c 2024-12-09T11:20:38,555 DEBUG [StoreOpener-2c19a2973bce9d6bae27713ac9f6f226-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:38,556 INFO [StoreOpener-2c19a2973bce9d6bae27713ac9f6f226-1 {}] regionserver.HStore(327): Store=2c19a2973bce9d6bae27713ac9f6f226/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:38,556 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 2c19a2973bce9d6bae27713ac9f6f226 2024-12-09T11:20:38,562 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/test2727/2c19a2973bce9d6bae27713ac9f6f226 2024-12-09T11:20:38,562 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/test2727/2c19a2973bce9d6bae27713ac9f6f226 2024-12-09T11:20:38,564 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 2c19a2973bce9d6bae27713ac9f6f226 2024-12-09T11:20:38,564 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 2c19a2973bce9d6bae27713ac9f6f226 2024-12-09T11:20:38,565 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table test2727 descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-09T11:20:38,567 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 2c19a2973bce9d6bae27713ac9f6f226 2024-12-09T11:20:38,571 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40493/hbase/data/default/test2727/2c19a2973bce9d6bae27713ac9f6f226/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T11:20:38,572 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 2c19a2973bce9d6bae27713ac9f6f226; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75384848, jitterRate=0.1233217716217041}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-09T11:20:38,573 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 2c19a2973bce9d6bae27713ac9f6f226: Writing region info on filesystem at 1733743238537Initializing all the Stores at 1733743238539 (+2 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743238539Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743238542 (+3 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743238542Cleaning up temporary data from old regions at 1733743238564 (+22 ms)Region opened successfully at 1733743238573 (+9 ms) 2024-12-09T11:20:38,574 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 2c19a2973bce9d6bae27713ac9f6f226, disabling compactions & flushes 2024-12-09T11:20:38,574 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region test2727,,1733743238414.2c19a2973bce9d6bae27713ac9f6f226. 2024-12-09T11:20:38,574 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on test2727,,1733743238414.2c19a2973bce9d6bae27713ac9f6f226. 2024-12-09T11:20:38,574 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on test2727,,1733743238414.2c19a2973bce9d6bae27713ac9f6f226. after waiting 0 ms 2024-12-09T11:20:38,574 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region test2727,,1733743238414.2c19a2973bce9d6bae27713ac9f6f226. 2024-12-09T11:20:38,579 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed test2727,,1733743238414.2c19a2973bce9d6bae27713ac9f6f226. 2024-12-09T11:20:38,579 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 2c19a2973bce9d6bae27713ac9f6f226: Waiting for close lock at 1733743238574Disabling compacts and flushes for region at 1733743238574Disabling writes for close at 1733743238574Writing region close event to WAL at 1733743238578 (+4 ms)Closed at 1733743238578 2024-12-09T11:20:38,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741864_1041 (size=95) 2024-12-09T11:20:38,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741864_1041 (size=95) 2024-12-09T11:20:38,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741864_1041 (size=95) 2024-12-09T11:20:38,595 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-09T11:20:38,595 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-75480012:(num 1733743238421) 2024-12-09T11:20:38,595 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-09T11:20:38,598 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:40493/hbase/WALs/test2727-manual,16010,1733743238412, archiveDir=hdfs://localhost:40493/hbase/oldWALs, maxLogs=32 2024-12-09T11:20:38,617 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/test2727-manual,16010,1733743238412/wal.1733743238598, exclude list is [], retry=0 2024-12-09T11:20:38,627 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34459,DS-f790a494-97c1-4864-a7b1-6442795d840b,DISK] 2024-12-09T11:20:38,627 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46359,DS-8f8100b1-22b1-43bf-9468-405dfca7481e,DISK] 2024-12-09T11:20:38,628 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44093,DS-049b5d3a-b506-46ca-8e7a-3fb74c0e7d3e,DISK] 2024-12-09T11:20:38,651 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/test2727-manual,16010,1733743238412/wal.1733743238598 2024-12-09T11:20:38,654 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39935:39935),(127.0.0.1/127.0.0.1:39985:39985),(127.0.0.1/127.0.0.1:43581:43581)] 2024-12-09T11:20:38,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741866_1043 (size=263359) 2024-12-09T11:20:38,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741866_1043 (size=263359) 2024-12-09T11:20:38,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741866_1043 (size=263359) 2024-12-09T11:20:38,920 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:40493/hbase/WALs/test2727-manual,16010,1733743238412/wal.1733743238598, size=257.2 K (263359bytes) 2024-12-09T11:20:38,921 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40493/hbase/WALs/test2727-manual,16010,1733743238412/wal.1733743238598 2024-12-09T11:20:38,921 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:40493/hbase/WALs/test2727-manual,16010,1733743238412/wal.1733743238598 after 0ms 2024-12-09T11:20:38,925 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:40493/hbase/WALs/test2727-manual,16010,1733743238412/wal.1733743238598: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-09T11:20:38,927 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:40493/hbase/WALs/test2727-manual,16010,1733743238412/wal.1733743238598 took 7ms 2024-12-09T11:20:38,934 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000001-wal.1733743238598.temp 2024-12-09T11:20:38,936 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:40493/hbase/data/default/test2727/2c19a2973bce9d6bae27713ac9f6f226/recovered.edits/0000000000000000001-wal.1733743238598.temp 2024-12-09T11:20:39,003 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:40493/hbase/WALs/test2727-manual,16010,1733743238412/wal.1733743238598 so closing down 2024-12-09T11:20:39,003 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-09T11:20:39,003 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-09T11:20:39,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741867_1044 (size=263359) 2024-12-09T11:20:39,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741867_1044 (size=263359) 2024-12-09T11:20:39,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741867_1044 (size=263359) 2024-12-09T11:20:39,018 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:40493/hbase/data/default/test2727/2c19a2973bce9d6bae27713ac9f6f226/recovered.edits/0000000000000000001-wal.1733743238598.temp (wrote 3000 edits, skipped 0 edits in 52 ms) 2024-12-09T11:20:39,023 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:40493/hbase/data/default/test2727/2c19a2973bce9d6bae27713ac9f6f226/recovered.edits/0000000000000000001-wal.1733743238598.temp to hdfs://localhost:40493/hbase/data/default/test2727/2c19a2973bce9d6bae27713ac9f6f226/recovered.edits/0000000000000003000 2024-12-09T11:20:39,023 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 3000 edits across 1 Regions in 94 ms; skipped=0; WAL=hdfs://localhost:40493/hbase/WALs/test2727-manual,16010,1733743238412/wal.1733743238598, size=257.2 K, length=263359, corrupted=false, cancelled=false 2024-12-09T11:20:39,023 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:40493/hbase/WALs/test2727-manual,16010,1733743238412/wal.1733743238598, journal: Splitting hdfs://localhost:40493/hbase/WALs/test2727-manual,16010,1733743238412/wal.1733743238598, size=257.2 K (263359bytes) at 1733743238921Creating recovered edits writer path=hdfs://localhost:40493/hbase/data/default/test2727/2c19a2973bce9d6bae27713ac9f6f226/recovered.edits/0000000000000000001-wal.1733743238598.temp at 1733743238936 (+15 ms)Split 1024 edits, skipped 0 edits. at 1733743238959 (+23 ms)Split 2048 edits, skipped 0 edits. at 1733743238984 (+25 ms)Finishing writing output for hdfs://localhost:40493/hbase/WALs/test2727-manual,16010,1733743238412/wal.1733743238598 so closing down at 1733743239003 (+19 ms)3 split writer threads finished at 1733743239004 (+1 ms)Closed recovered edits writer path=hdfs://localhost:40493/hbase/data/default/test2727/2c19a2973bce9d6bae27713ac9f6f226/recovered.edits/0000000000000000001-wal.1733743238598.temp (wrote 3000 edits, skipped 0 edits in 52 ms) at 1733743239018 (+14 ms)Rename recovered edits hdfs://localhost:40493/hbase/data/default/test2727/2c19a2973bce9d6bae27713ac9f6f226/recovered.edits/0000000000000000001-wal.1733743238598.temp to hdfs://localhost:40493/hbase/data/default/test2727/2c19a2973bce9d6bae27713ac9f6f226/recovered.edits/0000000000000003000 at 1733743239023 (+5 ms)Processed 3000 edits across 1 Regions in 94 ms; skipped=0; WAL=hdfs://localhost:40493/hbase/WALs/test2727-manual,16010,1733743238412/wal.1733743238598, size=257.2 K, length=263359, corrupted=false, cancelled=false at 1733743239023 2024-12-09T11:20:39,026 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:40493/hbase/WALs/test2727-manual,16010,1733743238412/wal.1733743238598 to hdfs://localhost:40493/hbase/oldWALs/wal.1733743238598 2024-12-09T11:20:39,030 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:40493/hbase/data/default/test2727/2c19a2973bce9d6bae27713ac9f6f226/recovered.edits/0000000000000003000 2024-12-09T11:20:39,030 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-09T11:20:39,033 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:40493/hbase/WALs/test2727-manual,16010,1733743238412, archiveDir=hdfs://localhost:40493/hbase/oldWALs, maxLogs=32 2024-12-09T11:20:39,051 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/test2727-manual,16010,1733743238412/wal.1733743239033, exclude list is [], retry=0 2024-12-09T11:20:39,056 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46359,DS-8f8100b1-22b1-43bf-9468-405dfca7481e,DISK] 2024-12-09T11:20:39,056 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44093,DS-049b5d3a-b506-46ca-8e7a-3fb74c0e7d3e,DISK] 2024-12-09T11:20:39,057 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34459,DS-f790a494-97c1-4864-a7b1-6442795d840b,DISK] 2024-12-09T11:20:39,067 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/test2727-manual,16010,1733743238412/wal.1733743239033 2024-12-09T11:20:39,070 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39985:39985),(127.0.0.1/127.0.0.1:43581:43581),(127.0.0.1/127.0.0.1:39935:39935)] 2024-12-09T11:20:39,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741868_1045 (size=263486) 2024-12-09T11:20:39,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741868_1045 (size=263486) 2024-12-09T11:20:39,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741868_1045 (size=263486) 2024-12-09T11:20:39,276 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:40493/hbase/WALs/test2727-manual,16010,1733743238412/wal.1733743239033, size=257.3 K (263486bytes) 2024-12-09T11:20:39,276 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40493/hbase/WALs/test2727-manual,16010,1733743238412/wal.1733743239033 2024-12-09T11:20:39,277 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:40493/hbase/WALs/test2727-manual,16010,1733743238412/wal.1733743239033 after 1ms 2024-12-09T11:20:39,280 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:40493/hbase/WALs/test2727-manual,16010,1733743238412/wal.1733743239033: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-09T11:20:39,282 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:40493/hbase/WALs/test2727-manual,16010,1733743238412/wal.1733743239033 took 6ms 2024-12-09T11:20:39,290 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000003001-wal.1733743239033.temp 2024-12-09T11:20:39,299 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:40493/hbase/data/default/test2727/2c19a2973bce9d6bae27713ac9f6f226/recovered.edits/0000000000000003001-wal.1733743239033.temp 2024-12-09T11:20:39,361 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:40493/hbase/WALs/test2727-manual,16010,1733743238412/wal.1733743239033 so closing down 2024-12-09T11:20:39,362 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-09T11:20:39,370 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-09T11:20:39,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741869_1046 (size=263486) 2024-12-09T11:20:39,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741869_1046 (size=263486) 2024-12-09T11:20:39,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741869_1046 (size=263486) 2024-12-09T11:20:39,381 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:40493/hbase/data/default/test2727/2c19a2973bce9d6bae27713ac9f6f226/recovered.edits/0000000000000003001-wal.1733743239033.temp (wrote 3000 edits, skipped 0 edits in 45 ms) 2024-12-09T11:20:39,387 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:40493/hbase/data/default/test2727/2c19a2973bce9d6bae27713ac9f6f226/recovered.edits/0000000000000003001-wal.1733743239033.temp to hdfs://localhost:40493/hbase/data/default/test2727/2c19a2973bce9d6bae27713ac9f6f226/recovered.edits/0000000000000006000 2024-12-09T11:20:39,387 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 3000 edits across 1 Regions in 101 ms; skipped=0; WAL=hdfs://localhost:40493/hbase/WALs/test2727-manual,16010,1733743238412/wal.1733743239033, size=257.3 K, length=263486, corrupted=false, cancelled=false 2024-12-09T11:20:39,387 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:40493/hbase/WALs/test2727-manual,16010,1733743238412/wal.1733743239033, journal: Splitting hdfs://localhost:40493/hbase/WALs/test2727-manual,16010,1733743238412/wal.1733743239033, size=257.3 K (263486bytes) at 1733743239276Creating recovered edits writer path=hdfs://localhost:40493/hbase/data/default/test2727/2c19a2973bce9d6bae27713ac9f6f226/recovered.edits/0000000000000003001-wal.1733743239033.temp at 1733743239299 (+23 ms)Split 1024 edits, skipped 0 edits. at 1733743239311 (+12 ms)Split 2048 edits, skipped 0 edits. at 1733743239338 (+27 ms)Finishing writing output for hdfs://localhost:40493/hbase/WALs/test2727-manual,16010,1733743238412/wal.1733743239033 so closing down at 1733743239361 (+23 ms)3 split writer threads finished at 1733743239370 (+9 ms)Closed recovered edits writer path=hdfs://localhost:40493/hbase/data/default/test2727/2c19a2973bce9d6bae27713ac9f6f226/recovered.edits/0000000000000003001-wal.1733743239033.temp (wrote 3000 edits, skipped 0 edits in 45 ms) at 1733743239381 (+11 ms)Rename recovered edits hdfs://localhost:40493/hbase/data/default/test2727/2c19a2973bce9d6bae27713ac9f6f226/recovered.edits/0000000000000003001-wal.1733743239033.temp to hdfs://localhost:40493/hbase/data/default/test2727/2c19a2973bce9d6bae27713ac9f6f226/recovered.edits/0000000000000006000 at 1733743239387 (+6 ms)Processed 3000 edits across 1 Regions in 101 ms; skipped=0; WAL=hdfs://localhost:40493/hbase/WALs/test2727-manual,16010,1733743238412/wal.1733743239033, size=257.3 K, length=263486, corrupted=false, cancelled=false at 1733743239387 2024-12-09T11:20:39,403 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:40493/hbase/WALs/test2727-manual,16010,1733743238412/wal.1733743239033 to hdfs://localhost:40493/hbase/oldWALs/wal.1733743239033 2024-12-09T11:20:39,405 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:40493/hbase/data/default/test2727/2c19a2973bce9d6bae27713ac9f6f226/recovered.edits/0000000000000006000 2024-12-09T11:20:39,405 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-09T11:20:39,408 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:40493/hbase/WALs/test2727-manual,16010,1733743238412, archiveDir=hdfs://localhost:40493/hbase/oldWALs, maxLogs=32 2024-12-09T11:20:39,430 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/test2727-manual,16010,1733743238412/wal.1733743239409, exclude list is [], retry=0 2024-12-09T11:20:39,433 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44093,DS-049b5d3a-b506-46ca-8e7a-3fb74c0e7d3e,DISK] 2024-12-09T11:20:39,434 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46359,DS-8f8100b1-22b1-43bf-9468-405dfca7481e,DISK] 2024-12-09T11:20:39,434 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34459,DS-f790a494-97c1-4864-a7b1-6442795d840b,DISK] 2024-12-09T11:20:39,465 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/test2727-manual,16010,1733743238412/wal.1733743239409 2024-12-09T11:20:39,466 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43581:43581),(127.0.0.1/127.0.0.1:39985:39985),(127.0.0.1/127.0.0.1:39935:39935)] 2024-12-09T11:20:39,467 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 2c19a2973bce9d6bae27713ac9f6f226, NAME => 'test2727,,1733743238414.2c19a2973bce9d6bae27713ac9f6f226.', STARTKEY => '', ENDKEY => ''} 2024-12-09T11:20:39,467 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated test2727,,1733743238414.2c19a2973bce9d6bae27713ac9f6f226.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:20:39,467 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 2c19a2973bce9d6bae27713ac9f6f226 2024-12-09T11:20:39,467 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 2c19a2973bce9d6bae27713ac9f6f226 2024-12-09T11:20:39,469 INFO [StoreOpener-2c19a2973bce9d6bae27713ac9f6f226-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 2c19a2973bce9d6bae27713ac9f6f226 2024-12-09T11:20:39,470 INFO [StoreOpener-2c19a2973bce9d6bae27713ac9f6f226-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2c19a2973bce9d6bae27713ac9f6f226 columnFamilyName a 2024-12-09T11:20:39,470 DEBUG [StoreOpener-2c19a2973bce9d6bae27713ac9f6f226-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:39,471 INFO [StoreOpener-2c19a2973bce9d6bae27713ac9f6f226-1 {}] regionserver.HStore(327): Store=2c19a2973bce9d6bae27713ac9f6f226/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:39,471 INFO [StoreOpener-2c19a2973bce9d6bae27713ac9f6f226-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 2c19a2973bce9d6bae27713ac9f6f226 2024-12-09T11:20:39,474 INFO [StoreOpener-2c19a2973bce9d6bae27713ac9f6f226-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2c19a2973bce9d6bae27713ac9f6f226 columnFamilyName b 2024-12-09T11:20:39,475 DEBUG [StoreOpener-2c19a2973bce9d6bae27713ac9f6f226-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:39,475 INFO [StoreOpener-2c19a2973bce9d6bae27713ac9f6f226-1 {}] regionserver.HStore(327): Store=2c19a2973bce9d6bae27713ac9f6f226/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:39,475 INFO [StoreOpener-2c19a2973bce9d6bae27713ac9f6f226-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 2c19a2973bce9d6bae27713ac9f6f226 2024-12-09T11:20:39,477 INFO [StoreOpener-2c19a2973bce9d6bae27713ac9f6f226-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2c19a2973bce9d6bae27713ac9f6f226 columnFamilyName c 2024-12-09T11:20:39,477 DEBUG [StoreOpener-2c19a2973bce9d6bae27713ac9f6f226-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:39,477 INFO [StoreOpener-2c19a2973bce9d6bae27713ac9f6f226-1 {}] regionserver.HStore(327): Store=2c19a2973bce9d6bae27713ac9f6f226/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:39,478 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 2c19a2973bce9d6bae27713ac9f6f226 2024-12-09T11:20:39,479 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/test2727/2c19a2973bce9d6bae27713ac9f6f226 2024-12-09T11:20:39,482 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 2 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/test2727/2c19a2973bce9d6bae27713ac9f6f226 2024-12-09T11:20:39,483 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:40493/hbase/data/default/test2727/2c19a2973bce9d6bae27713ac9f6f226/recovered.edits/0000000000000003000 2024-12-09T11:20:39,486 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:40493/hbase/data/default/test2727/2c19a2973bce9d6bae27713ac9f6f226/recovered.edits/0000000000000003000: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-09T11:20:39,572 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 3000, skipped 0, firstSequenceIdInLog=1, maxSequenceIdInLog=3000, path=hdfs://localhost:40493/hbase/data/default/test2727/2c19a2973bce9d6bae27713ac9f6f226/recovered.edits/0000000000000003000 2024-12-09T11:20:39,577 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:40493/hbase/data/default/test2727/2c19a2973bce9d6bae27713ac9f6f226/recovered.edits/0000000000000006000 2024-12-09T11:20:39,583 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:40493/hbase/data/default/test2727/2c19a2973bce9d6bae27713ac9f6f226/recovered.edits/0000000000000006000: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-09T11:20:39,649 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 3000, skipped 0, firstSequenceIdInLog=3001, maxSequenceIdInLog=6000, path=hdfs://localhost:40493/hbase/data/default/test2727/2c19a2973bce9d6bae27713ac9f6f226/recovered.edits/0000000000000006000 2024-12-09T11:20:39,650 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 2c19a2973bce9d6bae27713ac9f6f226 3/3 column families, dataSize=215.51 KB heapSize=657 KB 2024-12-09T11:20:39,681 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40493/hbase/data/default/test2727/2c19a2973bce9d6bae27713ac9f6f226/.tmp/a/aa74348a0363473db118cbc1891c2501 is 41, key is test2727/a:100/1733743239077/Put/seqid=0 2024-12-09T11:20:39,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741871_1048 (size=84227) 2024-12-09T11:20:39,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741871_1048 (size=84227) 2024-12-09T11:20:39,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741871_1048 (size=84227) 2024-12-09T11:20:39,703 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=71.84 KB at sequenceid=6000 (bloomFilter=true), to=hdfs://localhost:40493/hbase/data/default/test2727/2c19a2973bce9d6bae27713ac9f6f226/.tmp/a/aa74348a0363473db118cbc1891c2501 2024-12-09T11:20:39,740 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40493/hbase/data/default/test2727/2c19a2973bce9d6bae27713ac9f6f226/.tmp/b/c79a8cb0f3ae42cf8ca2e4cda47f5443 is 41, key is test2727/b:100/1733743239144/Put/seqid=0 2024-12-09T11:20:39,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741872_1049 (size=84609) 2024-12-09T11:20:39,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741872_1049 (size=84609) 2024-12-09T11:20:39,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741872_1049 (size=84609) 2024-12-09T11:20:39,754 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=71.84 KB at sequenceid=6000 (bloomFilter=true), to=hdfs://localhost:40493/hbase/data/default/test2727/2c19a2973bce9d6bae27713ac9f6f226/.tmp/b/c79a8cb0f3ae42cf8ca2e4cda47f5443 2024-12-09T11:20:39,790 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40493/hbase/data/default/test2727/2c19a2973bce9d6bae27713ac9f6f226/.tmp/c/f96b3cc8425f49bba34f371069daa47a is 41, key is test2727/c:100/1733743239203/Put/seqid=0 2024-12-09T11:20:39,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741873_1050 (size=84609) 2024-12-09T11:20:39,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741873_1050 (size=84609) 2024-12-09T11:20:39,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741873_1050 (size=84609) 2024-12-09T11:20:39,836 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=71.84 KB at sequenceid=6000 (bloomFilter=true), to=hdfs://localhost:40493/hbase/data/default/test2727/2c19a2973bce9d6bae27713ac9f6f226/.tmp/c/f96b3cc8425f49bba34f371069daa47a 2024-12-09T11:20:39,851 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40493/hbase/data/default/test2727/2c19a2973bce9d6bae27713ac9f6f226/.tmp/a/aa74348a0363473db118cbc1891c2501 as hdfs://localhost:40493/hbase/data/default/test2727/2c19a2973bce9d6bae27713ac9f6f226/a/aa74348a0363473db118cbc1891c2501 2024-12-09T11:20:39,865 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40493/hbase/data/default/test2727/2c19a2973bce9d6bae27713ac9f6f226/a/aa74348a0363473db118cbc1891c2501, entries=2000, sequenceid=6000, filesize=82.3 K 2024-12-09T11:20:39,867 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40493/hbase/data/default/test2727/2c19a2973bce9d6bae27713ac9f6f226/.tmp/b/c79a8cb0f3ae42cf8ca2e4cda47f5443 as hdfs://localhost:40493/hbase/data/default/test2727/2c19a2973bce9d6bae27713ac9f6f226/b/c79a8cb0f3ae42cf8ca2e4cda47f5443 2024-12-09T11:20:39,876 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40493/hbase/data/default/test2727/2c19a2973bce9d6bae27713ac9f6f226/b/c79a8cb0f3ae42cf8ca2e4cda47f5443, entries=2000, sequenceid=6000, filesize=82.6 K 2024-12-09T11:20:39,878 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40493/hbase/data/default/test2727/2c19a2973bce9d6bae27713ac9f6f226/.tmp/c/f96b3cc8425f49bba34f371069daa47a as hdfs://localhost:40493/hbase/data/default/test2727/2c19a2973bce9d6bae27713ac9f6f226/c/f96b3cc8425f49bba34f371069daa47a 2024-12-09T11:20:39,889 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40493/hbase/data/default/test2727/2c19a2973bce9d6bae27713ac9f6f226/c/f96b3cc8425f49bba34f371069daa47a, entries=2000, sequenceid=6000, filesize=82.6 K 2024-12-09T11:20:39,890 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~215.51 KB/220680, heapSize ~656.95 KB/672720, currentSize=0 B/0 for 2c19a2973bce9d6bae27713ac9f6f226 in 241ms, sequenceid=6000, compaction requested=false; wal=null 2024-12-09T11:20:39,892 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:40493/hbase/data/default/test2727/2c19a2973bce9d6bae27713ac9f6f226/recovered.edits/0000000000000003000 2024-12-09T11:20:39,893 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:40493/hbase/data/default/test2727/2c19a2973bce9d6bae27713ac9f6f226/recovered.edits/0000000000000006000 2024-12-09T11:20:39,895 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 2c19a2973bce9d6bae27713ac9f6f226 2024-12-09T11:20:39,895 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 2c19a2973bce9d6bae27713ac9f6f226 2024-12-09T11:20:39,896 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table test2727 descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-09T11:20:39,898 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 2c19a2973bce9d6bae27713ac9f6f226 2024-12-09T11:20:39,907 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40493/hbase/data/default/test2727/2c19a2973bce9d6bae27713ac9f6f226/recovered.edits/6000.seqid, newMaxSeqId=6000, maxSeqId=1 2024-12-09T11:20:39,908 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 2c19a2973bce9d6bae27713ac9f6f226; next sequenceid=6001; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63296350, jitterRate=-0.0568108856678009}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-09T11:20:39,910 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 2c19a2973bce9d6bae27713ac9f6f226: Writing region info on filesystem at 1733743239467Initializing all the Stores at 1733743239469 (+2 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743239469Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743239469Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743239469Obtaining lock to block concurrent updates at 1733743239650 (+181 ms)Preparing flush snapshotting stores in 2c19a2973bce9d6bae27713ac9f6f226 at 1733743239650Finished memstore snapshotting test2727,,1733743238414.2c19a2973bce9d6bae27713ac9f6f226., syncing WAL and waiting on mvcc, flushsize=dataSize=220680, getHeapSize=672720, getOffHeapSize=0, getCellsCount=6000 at 1733743239650Flushing stores of test2727,,1733743238414.2c19a2973bce9d6bae27713ac9f6f226. at 1733743239650Flushing 2c19a2973bce9d6bae27713ac9f6f226/a: creating writer at 1733743239650Flushing 2c19a2973bce9d6bae27713ac9f6f226/a: appending metadata at 1733743239681 (+31 ms)Flushing 2c19a2973bce9d6bae27713ac9f6f226/a: closing flushed file at 1733743239681Flushing 2c19a2973bce9d6bae27713ac9f6f226/b: creating writer at 1733743239710 (+29 ms)Flushing 2c19a2973bce9d6bae27713ac9f6f226/b: appending metadata at 1733743239738 (+28 ms)Flushing 2c19a2973bce9d6bae27713ac9f6f226/b: closing flushed file at 1733743239738Flushing 2c19a2973bce9d6bae27713ac9f6f226/c: creating writer at 1733743239763 (+25 ms)Flushing 2c19a2973bce9d6bae27713ac9f6f226/c: appending metadata at 1733743239789 (+26 ms)Flushing 2c19a2973bce9d6bae27713ac9f6f226/c: closing flushed file at 1733743239789Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@92d3b26: reopening flushed file at 1733743239849 (+60 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1b777bc1: reopening flushed file at 1733743239866 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6e7449c: reopening flushed file at 1733743239877 (+11 ms)Finished flush of dataSize ~215.51 KB/220680, heapSize ~656.95 KB/672720, currentSize=0 B/0 for 2c19a2973bce9d6bae27713ac9f6f226 in 241ms, sequenceid=6000, compaction requested=false; wal=null at 1733743239890 (+13 ms)Cleaning up temporary data from old regions at 1733743239895 (+5 ms)Region opened successfully at 1733743239910 (+15 ms) 2024-12-09T11:20:39,912 DEBUG [Time-limited test {}] wal.AbstractTestWALReplay(320): region.getOpenSeqNum(): 6001, wal3.id: 0 2024-12-09T11:20:39,912 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 2c19a2973bce9d6bae27713ac9f6f226, disabling compactions & flushes 2024-12-09T11:20:39,912 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region test2727,,1733743238414.2c19a2973bce9d6bae27713ac9f6f226. 2024-12-09T11:20:39,912 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on test2727,,1733743238414.2c19a2973bce9d6bae27713ac9f6f226. 2024-12-09T11:20:39,912 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on test2727,,1733743238414.2c19a2973bce9d6bae27713ac9f6f226. after waiting 0 ms 2024-12-09T11:20:39,912 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region test2727,,1733743238414.2c19a2973bce9d6bae27713ac9f6f226. 2024-12-09T11:20:39,915 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed test2727,,1733743238414.2c19a2973bce9d6bae27713ac9f6f226. 2024-12-09T11:20:39,915 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 2c19a2973bce9d6bae27713ac9f6f226: Waiting for close lock at 1733743239912Disabling compacts and flushes for region at 1733743239912Disabling writes for close at 1733743239912Writing region close event to WAL at 1733743239914 (+2 ms)Closed at 1733743239915 (+1 ms) 2024-12-09T11:20:39,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741870_1047 (size=95) 2024-12-09T11:20:39,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741870_1047 (size=95) 2024-12-09T11:20:39,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741870_1047 (size=95) 2024-12-09T11:20:39,925 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-09T11:20:39,925 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1733743239409) 2024-12-09T11:20:39,943 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#test2727 Thread=400 (was 395) Potentially hanging thread: AsyncFSWAL-14-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1633493794_22 at /127.0.0.1:58778 [Waiting for operation #15] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1633493794_22 at /127.0.0.1:40708 [Waiting for operation #9] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1633493794_22 at /127.0.0.1:58740 [Waiting for operation #10] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-14-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/cluster_b02fd66c-c8dc-38fe-f31f-89876c0daa74/data/data6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1633493794_22 at /127.0.0.1:44806 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/cluster_b02fd66c-c8dc-38fe-f31f-89876c0daa74/data/data5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-14-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=917 (was 853) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=337 (was 337), ProcessCount=11 (was 11), AvailableMemoryMB=1029 (was 1273) 2024-12-09T11:20:39,956 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testSequentialEditLogSeqNum Thread=400, OpenFileDescriptor=917, MaxFileDescriptor=1048576, SystemLoadAverage=337, ProcessCount=11, AvailableMemoryMB=1028 2024-12-09T11:20:39,976 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T11:20:39,983 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:40493/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733743239975, archiveDir=hdfs://localhost:40493/hbase/oldWALs, maxLogs=32 2024-12-09T11:20:39,984 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor wal.1733743239984 2024-12-09T11:20:40,001 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733743239975/wal.1733743239984 2024-12-09T11:20:40,009 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new MockWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43581:43581),(127.0.0.1/127.0.0.1:39985:39985),(127.0.0.1/127.0.0.1:39935:39935)] 2024-12-09T11:20:40,013 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => d1225173a8111dc482bbbcd025ab1da3, NAME => 'testSequentialEditLogSeqNum,,1733743239976.d1225173a8111dc482bbbcd025ab1da3.', STARTKEY => '', ENDKEY => ''} 2024-12-09T11:20:40,013 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testSequentialEditLogSeqNum,,1733743239976.d1225173a8111dc482bbbcd025ab1da3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:20:40,013 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for d1225173a8111dc482bbbcd025ab1da3 2024-12-09T11:20:40,013 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for d1225173a8111dc482bbbcd025ab1da3 2024-12-09T11:20:40,015 WARN [Time-limited test {}] regionserver.HRegionFileSystem(836): hdfs://localhost:40493/hbase/data/default/testSequentialEditLogSeqNum/d1225173a8111dc482bbbcd025ab1da3 doesn't exist for region: d1225173a8111dc482bbbcd025ab1da3 on table testSequentialEditLogSeqNum 2024-12-09T11:20:40,016 WARN [Time-limited test {}] regionserver.HRegionFileSystem(854): .regioninfo file not found for region: d1225173a8111dc482bbbcd025ab1da3 on table testSequentialEditLogSeqNum 2024-12-09T11:20:40,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741875_1052 (size=62) 2024-12-09T11:20:40,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741875_1052 (size=62) 2024-12-09T11:20:40,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741875_1052 (size=62) 2024-12-09T11:20:40,042 INFO [StoreOpener-d1225173a8111dc482bbbcd025ab1da3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region d1225173a8111dc482bbbcd025ab1da3 2024-12-09T11:20:40,045 INFO [StoreOpener-d1225173a8111dc482bbbcd025ab1da3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d1225173a8111dc482bbbcd025ab1da3 columnFamilyName a 2024-12-09T11:20:40,045 DEBUG [StoreOpener-d1225173a8111dc482bbbcd025ab1da3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:40,045 INFO [StoreOpener-d1225173a8111dc482bbbcd025ab1da3-1 {}] regionserver.HStore(327): Store=d1225173a8111dc482bbbcd025ab1da3/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:40,046 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for d1225173a8111dc482bbbcd025ab1da3 2024-12-09T11:20:40,047 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testSequentialEditLogSeqNum/d1225173a8111dc482bbbcd025ab1da3 2024-12-09T11:20:40,048 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testSequentialEditLogSeqNum/d1225173a8111dc482bbbcd025ab1da3 2024-12-09T11:20:40,049 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for d1225173a8111dc482bbbcd025ab1da3 2024-12-09T11:20:40,049 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for d1225173a8111dc482bbbcd025ab1da3 2024-12-09T11:20:40,056 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for d1225173a8111dc482bbbcd025ab1da3 2024-12-09T11:20:40,059 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40493/hbase/data/default/testSequentialEditLogSeqNum/d1225173a8111dc482bbbcd025ab1da3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T11:20:40,060 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened d1225173a8111dc482bbbcd025ab1da3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59083637, jitterRate=-0.11958520114421844}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T11:20:40,061 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for d1225173a8111dc482bbbcd025ab1da3: Writing region info on filesystem at 1733743240013Initializing all the Stores at 1733743240040 (+27 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743240040Cleaning up temporary data from old regions at 1733743240049 (+9 ms)Region opened successfully at 1733743240061 (+12 ms) 2024-12-09T11:20:40,087 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing d1225173a8111dc482bbbcd025ab1da3 1/1 column families, dataSize=770 B heapSize=1.73 KB 2024-12-09T11:20:40,117 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40493/hbase/data/default/testSequentialEditLogSeqNum/d1225173a8111dc482bbbcd025ab1da3/.tmp/a/5a781fd8a1f1406a918118e2d02bfff8 is 81, key is testSequentialEditLogSeqNum/a:x0/1733743240061/Put/seqid=0 2024-12-09T11:20:40,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741876_1053 (size=5833) 2024-12-09T11:20:40,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741876_1053 (size=5833) 2024-12-09T11:20:40,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741876_1053 (size=5833) 2024-12-09T11:20:40,128 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=770 B at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:40493/hbase/data/default/testSequentialEditLogSeqNum/d1225173a8111dc482bbbcd025ab1da3/.tmp/a/5a781fd8a1f1406a918118e2d02bfff8 2024-12-09T11:20:40,138 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40493/hbase/data/default/testSequentialEditLogSeqNum/d1225173a8111dc482bbbcd025ab1da3/.tmp/a/5a781fd8a1f1406a918118e2d02bfff8 as hdfs://localhost:40493/hbase/data/default/testSequentialEditLogSeqNum/d1225173a8111dc482bbbcd025ab1da3/a/5a781fd8a1f1406a918118e2d02bfff8 2024-12-09T11:20:40,145 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40493/hbase/data/default/testSequentialEditLogSeqNum/d1225173a8111dc482bbbcd025ab1da3/a/5a781fd8a1f1406a918118e2d02bfff8, entries=10, sequenceid=13, filesize=5.7 K 2024-12-09T11:20:40,147 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~770 B/770, heapSize ~1.72 KB/1760, currentSize=0 B/0 for d1225173a8111dc482bbbcd025ab1da3 in 60ms, sequenceid=13, compaction requested=false 2024-12-09T11:20:40,147 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for d1225173a8111dc482bbbcd025ab1da3: 2024-12-09T11:20:40,160 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:20:40,160 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:20:40,160 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:20:40,161 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:20:40,161 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:20:40,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741874_1051 (size=1844) 2024-12-09T11:20:40,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741874_1051 (size=1844) 2024-12-09T11:20:40,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741874_1051 (size=1844) 2024-12-09T11:20:40,186 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:40493/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733743239975/wal.1733743239984, size=1.8 K (1844bytes) 2024-12-09T11:20:40,187 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40493/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733743239975/wal.1733743239984 2024-12-09T11:20:40,187 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:40493/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733743239975/wal.1733743239984 after 0ms 2024-12-09T11:20:40,192 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:40493/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733743239975/wal.1733743239984: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-09T11:20:40,193 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:40493/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733743239975/wal.1733743239984 took 7ms 2024-12-09T11:20:40,201 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:40493/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733743239975/wal.1733743239984 so closing down 2024-12-09T11:20:40,201 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-09T11:20:40,202 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1733743239984.temp 2024-12-09T11:20:40,204 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testSequentialEditLogSeqNum/d1225173a8111dc482bbbcd025ab1da3/recovered.edits/0000000000000000003-wal.1733743239984.temp 2024-12-09T11:20:40,204 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-09T11:20:40,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741877_1054 (size=1477) 2024-12-09T11:20:40,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741877_1054 (size=1477) 2024-12-09T11:20:40,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741877_1054 (size=1477) 2024-12-09T11:20:40,271 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testSequentialEditLogSeqNum/d1225173a8111dc482bbbcd025ab1da3/recovered.edits/0000000000000000003-wal.1733743239984.temp (wrote 15 edits, skipped 0 edits in 0 ms) 2024-12-09T11:20:40,273 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:40493/hbase/data/default/testSequentialEditLogSeqNum/d1225173a8111dc482bbbcd025ab1da3/recovered.edits/0000000000000000003-wal.1733743239984.temp to hdfs://localhost:40493/hbase/data/default/testSequentialEditLogSeqNum/d1225173a8111dc482bbbcd025ab1da3/recovered.edits/0000000000000000020 2024-12-09T11:20:40,274 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 17 edits across 1 Regions in 75 ms; skipped=2; WAL=hdfs://localhost:40493/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733743239975/wal.1733743239984, size=1.8 K, length=1844, corrupted=false, cancelled=false 2024-12-09T11:20:40,274 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:40493/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733743239975/wal.1733743239984, journal: Splitting hdfs://localhost:40493/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733743239975/wal.1733743239984, size=1.8 K (1844bytes) at 1733743240187Finishing writing output for hdfs://localhost:40493/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733743239975/wal.1733743239984 so closing down at 1733743240201 (+14 ms)Creating recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testSequentialEditLogSeqNum/d1225173a8111dc482bbbcd025ab1da3/recovered.edits/0000000000000000003-wal.1733743239984.temp at 1733743240204 (+3 ms)3 split writer threads finished at 1733743240204Closed recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testSequentialEditLogSeqNum/d1225173a8111dc482bbbcd025ab1da3/recovered.edits/0000000000000000003-wal.1733743239984.temp (wrote 15 edits, skipped 0 edits in 0 ms) at 1733743240271 (+67 ms)Rename recovered edits hdfs://localhost:40493/hbase/data/default/testSequentialEditLogSeqNum/d1225173a8111dc482bbbcd025ab1da3/recovered.edits/0000000000000000003-wal.1733743239984.temp to hdfs://localhost:40493/hbase/data/default/testSequentialEditLogSeqNum/d1225173a8111dc482bbbcd025ab1da3/recovered.edits/0000000000000000020 at 1733743240273 (+2 ms)Processed 17 edits across 1 Regions in 75 ms; skipped=2; WAL=hdfs://localhost:40493/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733743239975/wal.1733743239984, size=1.8 K, length=1844, corrupted=false, cancelled=false at 1733743240274 (+1 ms) 2024-12-09T11:20:40,295 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testSequentialEditLogSeqNum Thread=405 (was 400) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1633493794_22 at /127.0.0.1:58778 [Waiting for operation #16] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1633493794_22 at /127.0.0.1:40708 [Waiting for operation #13] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1633493794_22 at /127.0.0.1:58740 [Waiting for operation #11] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) - Thread LEAK? -, OpenFileDescriptor=955 (was 917) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=337 (was 337), ProcessCount=11 (was 11), AvailableMemoryMB=944 (was 1028) 2024-12-09T11:20:40,308 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testRegionMadeOfBulkLoadedFilesOnly Thread=405, OpenFileDescriptor=955, MaxFileDescriptor=1048576, SystemLoadAverage=337, ProcessCount=11, AvailableMemoryMB=944 2024-12-09T11:20:40,333 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T11:20:40,335 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T11:20:40,371 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T11:20:40,375 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-53826960, suffix=, logDir=hdfs://localhost:40493/hbase/WALs/hregion-53826960, archiveDir=hdfs://localhost:40493/hbase/oldWALs, maxLogs=32 2024-12-09T11:20:40,392 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-53826960/hregion-53826960.1733743240375, exclude list is [], retry=0 2024-12-09T11:20:40,395 DEBUG [AsyncFSWAL-17-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44093,DS-049b5d3a-b506-46ca-8e7a-3fb74c0e7d3e,DISK] 2024-12-09T11:20:40,396 DEBUG [AsyncFSWAL-17-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34459,DS-f790a494-97c1-4864-a7b1-6442795d840b,DISK] 2024-12-09T11:20:40,397 DEBUG [AsyncFSWAL-17-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46359,DS-8f8100b1-22b1-43bf-9468-405dfca7481e,DISK] 2024-12-09T11:20:40,402 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-53826960/hregion-53826960.1733743240375 2024-12-09T11:20:40,405 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43581:43581),(127.0.0.1/127.0.0.1:39935:39935),(127.0.0.1/127.0.0.1:39985:39985)] 2024-12-09T11:20:40,406 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 066a91e76bad8245b090aafc7266d84d, NAME => 'testRegionMadeOfBulkLoadedFilesOnly,,1733743240334.066a91e76bad8245b090aafc7266d84d.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testRegionMadeOfBulkLoadedFilesOnly', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40493/hbase 2024-12-09T11:20:40,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741879_1056 (size=70) 2024-12-09T11:20:40,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741879_1056 (size=70) 2024-12-09T11:20:40,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741879_1056 (size=70) 2024-12-09T11:20:40,420 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testRegionMadeOfBulkLoadedFilesOnly,,1733743240334.066a91e76bad8245b090aafc7266d84d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:20:40,424 INFO [StoreOpener-066a91e76bad8245b090aafc7266d84d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 066a91e76bad8245b090aafc7266d84d 2024-12-09T11:20:40,426 INFO [StoreOpener-066a91e76bad8245b090aafc7266d84d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 066a91e76bad8245b090aafc7266d84d columnFamilyName a 2024-12-09T11:20:40,426 DEBUG [StoreOpener-066a91e76bad8245b090aafc7266d84d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:40,426 INFO [StoreOpener-066a91e76bad8245b090aafc7266d84d-1 {}] regionserver.HStore(327): Store=066a91e76bad8245b090aafc7266d84d/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:40,427 INFO [StoreOpener-066a91e76bad8245b090aafc7266d84d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 066a91e76bad8245b090aafc7266d84d 2024-12-09T11:20:40,429 INFO [StoreOpener-066a91e76bad8245b090aafc7266d84d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 066a91e76bad8245b090aafc7266d84d columnFamilyName b 2024-12-09T11:20:40,429 DEBUG [StoreOpener-066a91e76bad8245b090aafc7266d84d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:40,429 INFO [StoreOpener-066a91e76bad8245b090aafc7266d84d-1 {}] regionserver.HStore(327): Store=066a91e76bad8245b090aafc7266d84d/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:40,430 INFO [StoreOpener-066a91e76bad8245b090aafc7266d84d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 066a91e76bad8245b090aafc7266d84d 2024-12-09T11:20:40,431 INFO [StoreOpener-066a91e76bad8245b090aafc7266d84d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 066a91e76bad8245b090aafc7266d84d columnFamilyName c 2024-12-09T11:20:40,431 DEBUG [StoreOpener-066a91e76bad8245b090aafc7266d84d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:40,432 INFO [StoreOpener-066a91e76bad8245b090aafc7266d84d-1 {}] regionserver.HStore(327): Store=066a91e76bad8245b090aafc7266d84d/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:40,432 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 066a91e76bad8245b090aafc7266d84d 2024-12-09T11:20:40,433 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/066a91e76bad8245b090aafc7266d84d 2024-12-09T11:20:40,433 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/066a91e76bad8245b090aafc7266d84d 2024-12-09T11:20:40,434 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 066a91e76bad8245b090aafc7266d84d 2024-12-09T11:20:40,434 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 066a91e76bad8245b090aafc7266d84d 2024-12-09T11:20:40,435 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testRegionMadeOfBulkLoadedFilesOnly descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-09T11:20:40,436 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 066a91e76bad8245b090aafc7266d84d 2024-12-09T11:20:40,439 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40493/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/066a91e76bad8245b090aafc7266d84d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T11:20:40,439 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 066a91e76bad8245b090aafc7266d84d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63386165, jitterRate=-0.055472537875175476}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-09T11:20:40,440 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 066a91e76bad8245b090aafc7266d84d: Writing region info on filesystem at 1733743240420Initializing all the Stores at 1733743240422 (+2 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743240422Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743240423 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743240423Cleaning up temporary data from old regions at 1733743240434 (+11 ms)Region opened successfully at 1733743240440 (+6 ms) 2024-12-09T11:20:40,440 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 066a91e76bad8245b090aafc7266d84d, disabling compactions & flushes 2024-12-09T11:20:40,440 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testRegionMadeOfBulkLoadedFilesOnly,,1733743240334.066a91e76bad8245b090aafc7266d84d. 2024-12-09T11:20:40,440 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testRegionMadeOfBulkLoadedFilesOnly,,1733743240334.066a91e76bad8245b090aafc7266d84d. 2024-12-09T11:20:40,440 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testRegionMadeOfBulkLoadedFilesOnly,,1733743240334.066a91e76bad8245b090aafc7266d84d. after waiting 0 ms 2024-12-09T11:20:40,440 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testRegionMadeOfBulkLoadedFilesOnly,,1733743240334.066a91e76bad8245b090aafc7266d84d. 2024-12-09T11:20:40,443 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testRegionMadeOfBulkLoadedFilesOnly,,1733743240334.066a91e76bad8245b090aafc7266d84d. 2024-12-09T11:20:40,443 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 066a91e76bad8245b090aafc7266d84d: Waiting for close lock at 1733743240440Disabling compacts and flushes for region at 1733743240440Disabling writes for close at 1733743240440Writing region close event to WAL at 1733743240442 (+2 ms)Closed at 1733743240442 2024-12-09T11:20:40,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741878_1055 (size=95) 2024-12-09T11:20:40,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741878_1055 (size=95) 2024-12-09T11:20:40,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741878_1055 (size=95) 2024-12-09T11:20:40,449 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-09T11:20:40,449 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-53826960:(num 1733743240375) 2024-12-09T11:20:40,450 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-09T11:20:40,452 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:40493/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733743240333, archiveDir=hdfs://localhost:40493/hbase/oldWALs, maxLogs=32 2024-12-09T11:20:40,471 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733743240333/wal.1733743240452, exclude list is [], retry=0 2024-12-09T11:20:40,477 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44093,DS-049b5d3a-b506-46ca-8e7a-3fb74c0e7d3e,DISK] 2024-12-09T11:20:40,477 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34459,DS-f790a494-97c1-4864-a7b1-6442795d840b,DISK] 2024-12-09T11:20:40,477 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46359,DS-8f8100b1-22b1-43bf-9468-405dfca7481e,DISK] 2024-12-09T11:20:40,499 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733743240333/wal.1733743240452 2024-12-09T11:20:40,502 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43581:43581),(127.0.0.1/127.0.0.1:39935:39935),(127.0.0.1/127.0.0.1:39985:39985)] 2024-12-09T11:20:40,503 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 066a91e76bad8245b090aafc7266d84d, NAME => 'testRegionMadeOfBulkLoadedFilesOnly,,1733743240334.066a91e76bad8245b090aafc7266d84d.', STARTKEY => '', ENDKEY => ''} 2024-12-09T11:20:40,503 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testRegionMadeOfBulkLoadedFilesOnly,,1733743240334.066a91e76bad8245b090aafc7266d84d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:20:40,503 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 066a91e76bad8245b090aafc7266d84d 2024-12-09T11:20:40,503 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 066a91e76bad8245b090aafc7266d84d 2024-12-09T11:20:40,505 INFO [StoreOpener-066a91e76bad8245b090aafc7266d84d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 066a91e76bad8245b090aafc7266d84d 2024-12-09T11:20:40,506 INFO [StoreOpener-066a91e76bad8245b090aafc7266d84d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 066a91e76bad8245b090aafc7266d84d columnFamilyName a 2024-12-09T11:20:40,506 DEBUG [StoreOpener-066a91e76bad8245b090aafc7266d84d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:40,506 INFO [StoreOpener-066a91e76bad8245b090aafc7266d84d-1 {}] regionserver.HStore(327): Store=066a91e76bad8245b090aafc7266d84d/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:40,507 INFO [StoreOpener-066a91e76bad8245b090aafc7266d84d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 066a91e76bad8245b090aafc7266d84d 2024-12-09T11:20:40,507 INFO [StoreOpener-066a91e76bad8245b090aafc7266d84d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 066a91e76bad8245b090aafc7266d84d columnFamilyName b 2024-12-09T11:20:40,507 DEBUG [StoreOpener-066a91e76bad8245b090aafc7266d84d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:40,508 INFO [StoreOpener-066a91e76bad8245b090aafc7266d84d-1 {}] regionserver.HStore(327): Store=066a91e76bad8245b090aafc7266d84d/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:40,508 INFO [StoreOpener-066a91e76bad8245b090aafc7266d84d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 066a91e76bad8245b090aafc7266d84d 2024-12-09T11:20:40,509 INFO [StoreOpener-066a91e76bad8245b090aafc7266d84d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 066a91e76bad8245b090aafc7266d84d columnFamilyName c 2024-12-09T11:20:40,509 DEBUG [StoreOpener-066a91e76bad8245b090aafc7266d84d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:40,510 INFO [StoreOpener-066a91e76bad8245b090aafc7266d84d-1 {}] regionserver.HStore(327): Store=066a91e76bad8245b090aafc7266d84d/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:40,510 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 066a91e76bad8245b090aafc7266d84d 2024-12-09T11:20:40,510 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/066a91e76bad8245b090aafc7266d84d 2024-12-09T11:20:40,512 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/066a91e76bad8245b090aafc7266d84d 2024-12-09T11:20:40,513 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 066a91e76bad8245b090aafc7266d84d 2024-12-09T11:20:40,513 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 066a91e76bad8245b090aafc7266d84d 2024-12-09T11:20:40,514 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testRegionMadeOfBulkLoadedFilesOnly descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-09T11:20:40,517 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 066a91e76bad8245b090aafc7266d84d 2024-12-09T11:20:40,519 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 066a91e76bad8245b090aafc7266d84d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64458932, jitterRate=-0.03948706388473511}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-09T11:20:40,520 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 066a91e76bad8245b090aafc7266d84d: Writing region info on filesystem at 1733743240503Initializing all the Stores at 1733743240504 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743240504Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743240504Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743240505 (+1 ms)Cleaning up temporary data from old regions at 1733743240513 (+8 ms)Region opened successfully at 1733743240520 (+7 ms) 2024-12-09T11:20:40,529 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40493/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile is 28, key is \x0D/a:a/1733743240527/Put/seqid=0 2024-12-09T11:20:40,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741881_1058 (size=4826) 2024-12-09T11:20:40,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741881_1058 (size=4826) 2024-12-09T11:20:40,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741881_1058 (size=4826) 2024-12-09T11:20:40,552 INFO [Time-limited test {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:40493/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile for inclusion in 066a91e76bad8245b090aafc7266d84d/a 2024-12-09T11:20:40,564 DEBUG [Time-limited test {}] regionserver.HStore(626): HFile bounds: first= last=z 2024-12-09T11:20:40,565 DEBUG [Time-limited test {}] regionserver.HStore(628): Region bounds: first= last= 2024-12-09T11:20:40,565 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 066a91e76bad8245b090aafc7266d84d: 2024-12-09T11:20:40,568 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40493/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile as hdfs://localhost:40493/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/066a91e76bad8245b090aafc7266d84d/a/9979f7414d9f4caeb4b850e98bfe1bd5_SeqId_3_ 2024-12-09T11:20:40,570 INFO [Time-limited test {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:40493/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile into 066a91e76bad8245b090aafc7266d84d/a as hdfs://localhost:40493/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/066a91e76bad8245b090aafc7266d84d/a/9979f7414d9f4caeb4b850e98bfe1bd5_SeqId_3_ - updating store file list. 2024-12-09T11:20:40,580 DEBUG [Time-limited test {}] regionserver.HStoreFile(483): HFile Bloom filter type for 9979f7414d9f4caeb4b850e98bfe1bd5_SeqId_3_: NONE, but ROW specified in column family configuration 2024-12-09T11:20:40,580 INFO [Time-limited test {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:40493/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/066a91e76bad8245b090aafc7266d84d/a/9979f7414d9f4caeb4b850e98bfe1bd5_SeqId_3_ into 066a91e76bad8245b090aafc7266d84d/a 2024-12-09T11:20:40,580 INFO [Time-limited test {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:40493/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile into 066a91e76bad8245b090aafc7266d84d/a (new location: hdfs://localhost:40493/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/066a91e76bad8245b090aafc7266d84d/a/9979f7414d9f4caeb4b850e98bfe1bd5_SeqId_3_) 2024-12-09T11:20:40,632 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:40493/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733743240333/wal.1733743240452, size=0 (0bytes) 2024-12-09T11:20:40,632 WARN [Time-limited test {}] wal.WALSplitter(453): File hdfs://localhost:40493/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733743240333/wal.1733743240452 might be still open, length is 0 2024-12-09T11:20:40,632 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40493/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733743240333/wal.1733743240452 2024-12-09T11:20:40,633 WARN [IPC Server handler 0 on default port 40493 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733743240333/wal.1733743240452 has not been closed. Lease recovery is in progress. RecoveryId = 1059 for block blk_1073741880_1057 2024-12-09T11:20:40,633 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40493/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733743240333/wal.1733743240452 after 1ms 2024-12-09T11:20:41,416 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1633493794_22 at /127.0.0.1:44870 [Receiving block BP-32692473-172.17.0.3-1733743223895:blk_1073741880_1057] {}] datanode.DataXceiver(331): 127.0.0.1:44093:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44870 dst: /127.0.0.1:44093 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:44093 remote=/127.0.0.1:44870]. Total timeout mills is 60000, 59171 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:20:41,416 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1633493794_22 at /127.0.0.1:40810 [Receiving block BP-32692473-172.17.0.3-1733743223895:blk_1073741880_1057] {}] datanode.DataXceiver(331): 127.0.0.1:46359:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40810 dst: /127.0.0.1:46359 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:20:41,416 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1633493794_22 at /127.0.0.1:44164 [Receiving block BP-32692473-172.17.0.3-1733743223895:blk_1073741880_1057] {}] datanode.DataXceiver(331): 127.0.0.1:34459:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44164 dst: /127.0.0.1:34459 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:20:41,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741880_1059 (size=473) 2024-12-09T11:20:41,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741880_1059 (size=473) 2024-12-09T11:20:44,634 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:40493/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733743240333/wal.1733743240452 after 4002ms 2024-12-09T11:20:44,638 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:40493/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733743240333/wal.1733743240452: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-09T11:20:44,638 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:40493/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733743240333/wal.1733743240452 took 4006ms 2024-12-09T11:20:44,643 INFO [Time-limited test {}] wal.WALSplitter(493): EOF from hdfs://localhost:40493/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733743240333/wal.1733743240452; continuing. 2024-12-09T11:20:44,643 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:40493/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733743240333/wal.1733743240452 so closing down 2024-12-09T11:20:44,643 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-09T11:20:44,645 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000005-wal.1733743240452.temp 2024-12-09T11:20:44,647 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/066a91e76bad8245b090aafc7266d84d/recovered.edits/0000000000000000005-wal.1733743240452.temp 2024-12-09T11:20:44,647 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-09T11:20:44,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741882_1060 (size=259) 2024-12-09T11:20:44,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741882_1060 (size=259) 2024-12-09T11:20:44,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741882_1060 (size=259) 2024-12-09T11:20:44,663 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/066a91e76bad8245b090aafc7266d84d/recovered.edits/0000000000000000005-wal.1733743240452.temp (wrote 1 edits, skipped 0 edits in 0 ms) 2024-12-09T11:20:44,665 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:40493/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/066a91e76bad8245b090aafc7266d84d/recovered.edits/0000000000000000005-wal.1733743240452.temp to hdfs://localhost:40493/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/066a91e76bad8245b090aafc7266d84d/recovered.edits/0000000000000000005 2024-12-09T11:20:44,665 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 2 edits across 1 Regions in 24 ms; skipped=1; WAL=hdfs://localhost:40493/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733743240333/wal.1733743240452, size=0, length=0, corrupted=false, cancelled=false 2024-12-09T11:20:44,665 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:40493/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733743240333/wal.1733743240452, journal: Splitting hdfs://localhost:40493/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733743240333/wal.1733743240452, size=0 (0bytes) at 1733743240632Finishing writing output for hdfs://localhost:40493/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733743240333/wal.1733743240452 so closing down at 1733743244643 (+4011 ms)Creating recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/066a91e76bad8245b090aafc7266d84d/recovered.edits/0000000000000000005-wal.1733743240452.temp at 1733743244647 (+4 ms)3 split writer threads finished at 1733743244647Closed recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/066a91e76bad8245b090aafc7266d84d/recovered.edits/0000000000000000005-wal.1733743240452.temp (wrote 1 edits, skipped 0 edits in 0 ms) at 1733743244663 (+16 ms)Rename recovered edits hdfs://localhost:40493/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/066a91e76bad8245b090aafc7266d84d/recovered.edits/0000000000000000005-wal.1733743240452.temp to hdfs://localhost:40493/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/066a91e76bad8245b090aafc7266d84d/recovered.edits/0000000000000000005 at 1733743244665 (+2 ms)Processed 2 edits across 1 Regions in 24 ms; skipped=1; WAL=hdfs://localhost:40493/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733743240333/wal.1733743240452, size=0, length=0, corrupted=false, cancelled=false at 1733743244665 2024-12-09T11:20:44,667 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:40493/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733743240333/wal.1733743240452 to hdfs://localhost:40493/hbase/oldWALs/wal.1733743240452 2024-12-09T11:20:44,668 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:40493/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/066a91e76bad8245b090aafc7266d84d/recovered.edits/0000000000000000005 2024-12-09T11:20:44,668 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-09T11:20:44,670 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:40493/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733743240333, archiveDir=hdfs://localhost:40493/hbase/oldWALs, maxLogs=32 2024-12-09T11:20:44,687 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733743240333/wal.1733743244671, exclude list is [], retry=0 2024-12-09T11:20:44,693 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34459,DS-f790a494-97c1-4864-a7b1-6442795d840b,DISK] 2024-12-09T11:20:44,693 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44093,DS-049b5d3a-b506-46ca-8e7a-3fb74c0e7d3e,DISK] 2024-12-09T11:20:44,693 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46359,DS-8f8100b1-22b1-43bf-9468-405dfca7481e,DISK] 2024-12-09T11:20:44,703 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733743240333/wal.1733743244671 2024-12-09T11:20:44,703 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39935:39935),(127.0.0.1/127.0.0.1:43581:43581),(127.0.0.1/127.0.0.1:39985:39985)] 2024-12-09T11:20:44,704 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 066a91e76bad8245b090aafc7266d84d, NAME => 'testRegionMadeOfBulkLoadedFilesOnly,,1733743240334.066a91e76bad8245b090aafc7266d84d.', STARTKEY => '', ENDKEY => ''} 2024-12-09T11:20:44,704 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testRegionMadeOfBulkLoadedFilesOnly,,1733743240334.066a91e76bad8245b090aafc7266d84d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:20:44,704 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 066a91e76bad8245b090aafc7266d84d 2024-12-09T11:20:44,704 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 066a91e76bad8245b090aafc7266d84d 2024-12-09T11:20:44,715 INFO [StoreOpener-066a91e76bad8245b090aafc7266d84d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 066a91e76bad8245b090aafc7266d84d 2024-12-09T11:20:44,716 INFO [StoreOpener-066a91e76bad8245b090aafc7266d84d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 066a91e76bad8245b090aafc7266d84d columnFamilyName a 2024-12-09T11:20:44,716 DEBUG [StoreOpener-066a91e76bad8245b090aafc7266d84d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:44,725 DEBUG [StoreFileOpener-066a91e76bad8245b090aafc7266d84d-a-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 9979f7414d9f4caeb4b850e98bfe1bd5_SeqId_3_: NONE, but ROW specified in column family configuration 2024-12-09T11:20:44,725 DEBUG [StoreOpener-066a91e76bad8245b090aafc7266d84d-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:40493/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/066a91e76bad8245b090aafc7266d84d/a/9979f7414d9f4caeb4b850e98bfe1bd5_SeqId_3_ 2024-12-09T11:20:44,725 INFO [StoreOpener-066a91e76bad8245b090aafc7266d84d-1 {}] regionserver.HStore(327): Store=066a91e76bad8245b090aafc7266d84d/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:44,725 INFO [StoreOpener-066a91e76bad8245b090aafc7266d84d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 066a91e76bad8245b090aafc7266d84d 2024-12-09T11:20:44,727 INFO [StoreOpener-066a91e76bad8245b090aafc7266d84d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 066a91e76bad8245b090aafc7266d84d columnFamilyName b 2024-12-09T11:20:44,727 DEBUG [StoreOpener-066a91e76bad8245b090aafc7266d84d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:44,727 INFO [StoreOpener-066a91e76bad8245b090aafc7266d84d-1 {}] regionserver.HStore(327): Store=066a91e76bad8245b090aafc7266d84d/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:44,728 INFO [StoreOpener-066a91e76bad8245b090aafc7266d84d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 066a91e76bad8245b090aafc7266d84d 2024-12-09T11:20:44,729 INFO [StoreOpener-066a91e76bad8245b090aafc7266d84d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 066a91e76bad8245b090aafc7266d84d columnFamilyName c 2024-12-09T11:20:44,729 DEBUG [StoreOpener-066a91e76bad8245b090aafc7266d84d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:44,730 INFO [StoreOpener-066a91e76bad8245b090aafc7266d84d-1 {}] regionserver.HStore(327): Store=066a91e76bad8245b090aafc7266d84d/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:44,730 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 066a91e76bad8245b090aafc7266d84d 2024-12-09T11:20:44,731 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/066a91e76bad8245b090aafc7266d84d 2024-12-09T11:20:44,733 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/066a91e76bad8245b090aafc7266d84d 2024-12-09T11:20:44,734 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:40493/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/066a91e76bad8245b090aafc7266d84d/recovered.edits/0000000000000000005 2024-12-09T11:20:44,738 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:40493/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/066a91e76bad8245b090aafc7266d84d/recovered.edits/0000000000000000005: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-09T11:20:44,739 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 1, skipped 0, firstSequenceIdInLog=5, maxSequenceIdInLog=5, path=hdfs://localhost:40493/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/066a91e76bad8245b090aafc7266d84d/recovered.edits/0000000000000000005 2024-12-09T11:20:44,739 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 066a91e76bad8245b090aafc7266d84d 3/3 column families, dataSize=58 B heapSize=904 B 2024-12-09T11:20:44,761 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40493/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/066a91e76bad8245b090aafc7266d84d/.tmp/a/d436485f36c94444a3cc9005106c3b83 is 62, key is testRegionMadeOfBulkLoadedFilesOnly/a:a/1733743240585/Put/seqid=0 2024-12-09T11:20:44,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741884_1062 (size=5149) 2024-12-09T11:20:44,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741884_1062 (size=5149) 2024-12-09T11:20:44,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741884_1062 (size=5149) 2024-12-09T11:20:44,778 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:40493/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/066a91e76bad8245b090aafc7266d84d/.tmp/a/d436485f36c94444a3cc9005106c3b83 2024-12-09T11:20:44,785 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40493/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/066a91e76bad8245b090aafc7266d84d/.tmp/a/d436485f36c94444a3cc9005106c3b83 as hdfs://localhost:40493/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/066a91e76bad8245b090aafc7266d84d/a/d436485f36c94444a3cc9005106c3b83 2024-12-09T11:20:44,791 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40493/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/066a91e76bad8245b090aafc7266d84d/a/d436485f36c94444a3cc9005106c3b83, entries=1, sequenceid=5, filesize=5.0 K 2024-12-09T11:20:44,791 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~58 B/58, heapSize ~376 B/376, currentSize=0 B/0 for 066a91e76bad8245b090aafc7266d84d in 52ms, sequenceid=5, compaction requested=false; wal=null 2024-12-09T11:20:44,792 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:40493/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/066a91e76bad8245b090aafc7266d84d/recovered.edits/0000000000000000005 2024-12-09T11:20:44,794 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 066a91e76bad8245b090aafc7266d84d 2024-12-09T11:20:44,794 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 066a91e76bad8245b090aafc7266d84d 2024-12-09T11:20:44,795 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testRegionMadeOfBulkLoadedFilesOnly descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-09T11:20:44,797 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 066a91e76bad8245b090aafc7266d84d 2024-12-09T11:20:44,802 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40493/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/066a91e76bad8245b090aafc7266d84d/recovered.edits/5.seqid, newMaxSeqId=5, maxSeqId=1 2024-12-09T11:20:44,804 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 066a91e76bad8245b090aafc7266d84d; next sequenceid=6; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74449060, jitterRate=0.10937744379043579}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-09T11:20:44,804 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 066a91e76bad8245b090aafc7266d84d: Writing region info on filesystem at 1733743244704Initializing all the Stores at 1733743244707 (+3 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743244707Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743244714 (+7 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743244714Obtaining lock to block concurrent updates at 1733743244739 (+25 ms)Preparing flush snapshotting stores in 066a91e76bad8245b090aafc7266d84d at 1733743244739Finished memstore snapshotting testRegionMadeOfBulkLoadedFilesOnly,,1733743240334.066a91e76bad8245b090aafc7266d84d., syncing WAL and waiting on mvcc, flushsize=dataSize=58, getHeapSize=856, getOffHeapSize=0, getCellsCount=1 at 1733743244740 (+1 ms)Flushing stores of testRegionMadeOfBulkLoadedFilesOnly,,1733743240334.066a91e76bad8245b090aafc7266d84d. at 1733743244740Flushing 066a91e76bad8245b090aafc7266d84d/a: creating writer at 1733743244740Flushing 066a91e76bad8245b090aafc7266d84d/a: appending metadata at 1733743244760 (+20 ms)Flushing 066a91e76bad8245b090aafc7266d84d/a: closing flushed file at 1733743244760Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@53cb537e: reopening flushed file at 1733743244784 (+24 ms)Finished flush of dataSize ~58 B/58, heapSize ~376 B/376, currentSize=0 B/0 for 066a91e76bad8245b090aafc7266d84d in 52ms, sequenceid=5, compaction requested=false; wal=null at 1733743244791 (+7 ms)Cleaning up temporary data from old regions at 1733743244794 (+3 ms)Region opened successfully at 1733743244804 (+10 ms) 2024-12-09T11:20:44,808 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 066a91e76bad8245b090aafc7266d84d, disabling compactions & flushes 2024-12-09T11:20:44,808 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testRegionMadeOfBulkLoadedFilesOnly,,1733743240334.066a91e76bad8245b090aafc7266d84d. 2024-12-09T11:20:44,808 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testRegionMadeOfBulkLoadedFilesOnly,,1733743240334.066a91e76bad8245b090aafc7266d84d. 2024-12-09T11:20:44,808 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testRegionMadeOfBulkLoadedFilesOnly,,1733743240334.066a91e76bad8245b090aafc7266d84d. after waiting 0 ms 2024-12-09T11:20:44,808 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testRegionMadeOfBulkLoadedFilesOnly,,1733743240334.066a91e76bad8245b090aafc7266d84d. 2024-12-09T11:20:44,809 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testRegionMadeOfBulkLoadedFilesOnly,,1733743240334.066a91e76bad8245b090aafc7266d84d. 2024-12-09T11:20:44,809 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 066a91e76bad8245b090aafc7266d84d: Waiting for close lock at 1733743244808Disabling compacts and flushes for region at 1733743244808Disabling writes for close at 1733743244808Writing region close event to WAL at 1733743244809 (+1 ms)Closed at 1733743244809 2024-12-09T11:20:44,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741883_1061 (size=95) 2024-12-09T11:20:44,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741883_1061 (size=95) 2024-12-09T11:20:44,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741883_1061 (size=95) 2024-12-09T11:20:44,815 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-09T11:20:44,815 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1733743244671) 2024-12-09T11:20:44,829 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testRegionMadeOfBulkLoadedFilesOnly Thread=408 (was 405) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_25280338_22 at /127.0.0.1:44920 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_25280338_22 at /127.0.0.1:40838 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_25280338_22 at /127.0.0.1:44190 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/cluster_b02fd66c-c8dc-38fe-f31f-89876c0daa74/data/data3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-17-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkinstestRegionMadeOfBulkLoadedFilesOnly@localhost:40493 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/cluster_b02fd66c-c8dc-38fe-f31f-89876c0daa74/data/data4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-17-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-17-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1319272049) connection to localhost/127.0.0.1:40493 from jenkinstestRegionMadeOfBulkLoadedFilesOnly java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=1013 (was 955) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=374 (was 337) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=719 (was 944) 2024-12-09T11:20:44,844 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterRegionMovedWithMultiCF Thread=408, OpenFileDescriptor=1013, MaxFileDescriptor=1048576, SystemLoadAverage=374, ProcessCount=11, AvailableMemoryMB=718 2024-12-09T11:20:44,863 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T11:20:44,868 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T11:20:44,873 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 2dff3a36d44f,42781,1733743227566 2024-12-09T11:20:44,877 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@5854061f 2024-12-09T11:20:44,878 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T11:20:44,881 INFO [HMaster-EventLoopGroup-2-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53594, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T11:20:44,886 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42781 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testReplayEditsAfterRegionMovedWithMultiCF', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T11:20:44,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42781 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF 2024-12-09T11:20:44,898 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T11:20:44,901 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42781 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testReplayEditsAfterRegionMovedWithMultiCF" procId is: 4 2024-12-09T11:20:44,901 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:44,904 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T11:20:44,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42781 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T11:20:44,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741885_1063 (size=694) 2024-12-09T11:20:44,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741885_1063 (size=694) 2024-12-09T11:20:44,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741885_1063 (size=694) 2024-12-09T11:20:44,952 INFO [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 4dc5ca2e3dd0f6286d8d8a4977d489a3, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsAfterRegionMovedWithMultiCF', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de 2024-12-09T11:20:44,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741886_1064 (size=77) 2024-12-09T11:20:44,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741886_1064 (size=77) 2024-12-09T11:20:44,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741886_1064 (size=77) 2024-12-09T11:20:44,997 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:20:44,997 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1722): Closing 4dc5ca2e3dd0f6286d8d8a4977d489a3, disabling compactions & flushes 2024-12-09T11:20:44,997 INFO [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. 2024-12-09T11:20:44,997 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. 2024-12-09T11:20:44,997 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. after waiting 0 ms 2024-12-09T11:20:44,997 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. 2024-12-09T11:20:44,997 INFO [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. 2024-12-09T11:20:44,997 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1676): Region close journal for 4dc5ca2e3dd0f6286d8d8a4977d489a3: Waiting for close lock at 1733743244997Disabling compacts and flushes for region at 1733743244997Disabling writes for close at 1733743244997Writing region close event to WAL at 1733743244997Closed at 1733743244997 2024-12-09T11:20:44,999 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T11:20:45,005 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3.","families":{"info":[{"qualifier":"regioninfo","vlen":76,"tag":[],"timestamp":"1733743245000"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733743245000"}]},"ts":"1733743245000"} 2024-12-09T11:20:45,012 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T11:20:45,014 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T11:20:45,017 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testReplayEditsAfterRegionMovedWithMultiCF","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733743245014"}]},"ts":"1733743245014"} 2024-12-09T11:20:45,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42781 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T11:20:45,022 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testReplayEditsAfterRegionMovedWithMultiCF, state=ENABLING in hbase:meta 2024-12-09T11:20:45,023 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {2dff3a36d44f=0} racks are {/default-rack=0} 2024-12-09T11:20:45,025 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T11:20:45,025 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T11:20:45,025 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T11:20:45,025 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T11:20:45,025 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T11:20:45,025 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T11:20:45,025 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T11:20:45,025 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T11:20:45,025 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T11:20:45,025 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T11:20:45,027 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=4dc5ca2e3dd0f6286d8d8a4977d489a3, ASSIGN}] 2024-12-09T11:20:45,028 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=4dc5ca2e3dd0f6286d8d8a4977d489a3, ASSIGN 2024-12-09T11:20:45,031 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=4dc5ca2e3dd0f6286d8d8a4977d489a3, ASSIGN; state=OFFLINE, location=2dff3a36d44f,39663,1733743228795; forceNewPlan=false, retain=false 2024-12-09T11:20:45,183 INFO [2dff3a36d44f:42781 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-09T11:20:45,184 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4dc5ca2e3dd0f6286d8d8a4977d489a3, regionState=OPENING, regionLocation=2dff3a36d44f,39663,1733743228795 2024-12-09T11:20:45,188 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=4dc5ca2e3dd0f6286d8d8a4977d489a3, ASSIGN because future has completed 2024-12-09T11:20:45,191 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4dc5ca2e3dd0f6286d8d8a4977d489a3, server=2dff3a36d44f,39663,1733743228795}] 2024-12-09T11:20:45,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42781 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T11:20:45,352 INFO [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. 2024-12-09T11:20:45,352 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 4dc5ca2e3dd0f6286d8d8a4977d489a3, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3.', STARTKEY => '', ENDKEY => ''} 2024-12-09T11:20:45,353 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterRegionMovedWithMultiCF 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:45,353 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:20:45,354 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:45,354 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:45,359 INFO [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:45,361 INFO [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4dc5ca2e3dd0f6286d8d8a4977d489a3 columnFamilyName cf1 2024-12-09T11:20:45,361 DEBUG [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:45,361 INFO [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] regionserver.HStore(327): Store=4dc5ca2e3dd0f6286d8d8a4977d489a3/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:45,361 INFO [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:45,363 INFO [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4dc5ca2e3dd0f6286d8d8a4977d489a3 columnFamilyName cf2 2024-12-09T11:20:45,363 DEBUG [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:45,364 INFO [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] regionserver.HStore(327): Store=4dc5ca2e3dd0f6286d8d8a4977d489a3/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:45,364 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:45,365 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:45,365 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:45,366 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:45,366 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:45,367 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterRegionMovedWithMultiCF descriptor;using region.getMemStoreFlushHeapSize/# of families (64.0 M)) instead. 2024-12-09T11:20:45,368 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:45,371 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T11:20:45,374 INFO [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 4dc5ca2e3dd0f6286d8d8a4977d489a3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61877678, jitterRate=-0.07795074582099915}}}, FlushLargeStoresPolicy{flushSizeLowerBound=67108864} 2024-12-09T11:20:45,374 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:45,375 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 4dc5ca2e3dd0f6286d8d8a4977d489a3: Running coprocessor pre-open hook at 1733743245354Writing region info on filesystem at 1733743245354Initializing all the Stores at 1733743245355 (+1 ms)Instantiating store for column family {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743245355Instantiating store for column family {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743245358 (+3 ms)Cleaning up temporary data from old regions at 1733743245366 (+8 ms)Running coprocessor post-open hooks at 1733743245374 (+8 ms)Region opened successfully at 1733743245375 (+1 ms) 2024-12-09T11:20:45,377 INFO [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3., pid=6, masterSystemTime=1733743245345 2024-12-09T11:20:45,380 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. 2024-12-09T11:20:45,380 INFO [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. 2024-12-09T11:20:45,381 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4dc5ca2e3dd0f6286d8d8a4977d489a3, regionState=OPEN, openSeqNum=2, regionLocation=2dff3a36d44f,39663,1733743228795 2024-12-09T11:20:45,385 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4dc5ca2e3dd0f6286d8d8a4977d489a3, server=2dff3a36d44f,39663,1733743228795 because future has completed 2024-12-09T11:20:45,390 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T11:20:45,391 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 4dc5ca2e3dd0f6286d8d8a4977d489a3, server=2dff3a36d44f,39663,1733743228795 in 196 msec 2024-12-09T11:20:45,398 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T11:20:45,398 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=4dc5ca2e3dd0f6286d8d8a4977d489a3, ASSIGN in 364 msec 2024-12-09T11:20:45,400 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T11:20:45,400 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testReplayEditsAfterRegionMovedWithMultiCF","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733743245400"}]},"ts":"1733743245400"} 2024-12-09T11:20:45,404 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testReplayEditsAfterRegionMovedWithMultiCF, state=ENABLED in hbase:meta 2024-12-09T11:20:45,406 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T11:20:45,409 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF in 518 msec 2024-12-09T11:20:45,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42781 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T11:20:45,543 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testReplayEditsAfterRegionMovedWithMultiCF completed 2024-12-09T11:20:45,543 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testReplayEditsAfterRegionMovedWithMultiCF get assigned. Timeout = 60000ms 2024-12-09T11:20:45,545 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T11:20:45,551 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testReplayEditsAfterRegionMovedWithMultiCF assigned to meta. Checking AM states. 2024-12-09T11:20:45,552 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T11:20:45,552 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testReplayEditsAfterRegionMovedWithMultiCF assigned. 2024-12-09T11:20:45,567 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testReplayEditsAfterRegionMovedWithMultiCF', row='r1', locateType=CURRENT is [region=testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3., hostname=2dff3a36d44f,39663,1733743228795, seqNum=2] 2024-12-09T11:20:45,587 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42781 {}] master.HMaster(2410): Client=jenkins//172.17.0.3 move hri=4dc5ca2e3dd0f6286d8d8a4977d489a3, source=2dff3a36d44f,39663,1733743228795, destination=2dff3a36d44f,46259,1733743228656, warming up region on 2dff3a36d44f,46259,1733743228656 2024-12-09T11:20:45,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42781 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T11:20:45,591 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42781 {}] master.HMaster(2414): Client=jenkins//172.17.0.3 move hri=4dc5ca2e3dd0f6286d8d8a4977d489a3, source=2dff3a36d44f,39663,1733743228795, destination=2dff3a36d44f,46259,1733743228656, running balancer 2024-12-09T11:20:45,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42781 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=4dc5ca2e3dd0f6286d8d8a4977d489a3, REOPEN/MOVE 2024-12-09T11:20:45,592 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=4dc5ca2e3dd0f6286d8d8a4977d489a3, REOPEN/MOVE 2024-12-09T11:20:45,594 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=7 updating hbase:meta row=4dc5ca2e3dd0f6286d8d8a4977d489a3, regionState=CLOSING, regionLocation=2dff3a36d44f,39663,1733743228795 2024-12-09T11:20:45,597 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=4dc5ca2e3dd0f6286d8d8a4977d489a3, REOPEN/MOVE because future has completed 2024-12-09T11:20:45,597 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:58495, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T11:20:45,597 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T11:20:45,598 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; CloseRegionProcedure 4dc5ca2e3dd0f6286d8d8a4977d489a3, server=2dff3a36d44f,39663,1733743228795}] 2024-12-09T11:20:45,602 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46259 {}] regionserver.RSRpcServices(2066): Warmup testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. 2024-12-09T11:20:45,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46259 {}] regionserver.HRegion(7855): Warmup {ENCODED => 4dc5ca2e3dd0f6286d8d8a4977d489a3, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3.', STARTKEY => '', ENDKEY => ''} 2024-12-09T11:20:45,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46259 {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:20:45,603 INFO [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:45,605 INFO [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4dc5ca2e3dd0f6286d8d8a4977d489a3 columnFamilyName cf1 2024-12-09T11:20:45,605 DEBUG [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:45,605 INFO [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] regionserver.HStore(327): Store=4dc5ca2e3dd0f6286d8d8a4977d489a3/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:45,605 INFO [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:45,606 INFO [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4dc5ca2e3dd0f6286d8d8a4977d489a3 columnFamilyName cf2 2024-12-09T11:20:45,606 DEBUG [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:45,609 INFO [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] regionserver.HStore(327): Store=4dc5ca2e3dd0f6286d8d8a4977d489a3/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:45,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46259 {}] regionserver.HRegion(1722): Closing 4dc5ca2e3dd0f6286d8d8a4977d489a3, disabling compactions & flushes 2024-12-09T11:20:45,609 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46259 {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. 2024-12-09T11:20:45,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46259 {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. 2024-12-09T11:20:45,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46259 {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. after waiting 0 ms 2024-12-09T11:20:45,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46259 {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. 2024-12-09T11:20:45,610 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46259 {}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. 2024-12-09T11:20:45,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46259 {}] regionserver.HRegion(1676): Region close journal for 4dc5ca2e3dd0f6286d8d8a4977d489a3: Waiting for close lock at 1733743245609Disabling compacts and flushes for region at 1733743245609Disabling writes for close at 1733743245609Writing region close event to WAL at 1733743245610 (+1 ms)Closed at 1733743245610 2024-12-09T11:20:45,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42781 {}] procedure.ProcedureSyncWait(219): waitFor pid=7 2024-12-09T11:20:45,755 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] handler.UnassignRegionHandler(122): Close 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:45,755 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T11:20:45,756 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1722): Closing 4dc5ca2e3dd0f6286d8d8a4977d489a3, disabling compactions & flushes 2024-12-09T11:20:45,756 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. 2024-12-09T11:20:45,756 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. 2024-12-09T11:20:45,756 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. after waiting 0 ms 2024-12-09T11:20:45,756 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. 2024-12-09T11:20:45,756 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(2902): Flushing 4dc5ca2e3dd0f6286d8d8a4977d489a3 2/2 column families, dataSize=31 B heapSize=616 B 2024-12-09T11:20:45,775 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3/.tmp/cf1/7aa6ba5a0b4a46afb779e4ec71507e19 is 35, key is r1/cf1:q/1733743245570/Put/seqid=0 2024-12-09T11:20:45,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741887_1065 (size=4783) 2024-12-09T11:20:45,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741887_1065 (size=4783) 2024-12-09T11:20:45,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741887_1065 (size=4783) 2024-12-09T11:20:46,187 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3/.tmp/cf1/7aa6ba5a0b4a46afb779e4ec71507e19 2024-12-09T11:20:46,200 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3/.tmp/cf1/7aa6ba5a0b4a46afb779e4ec71507e19 as hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3/cf1/7aa6ba5a0b4a46afb779e4ec71507e19 2024-12-09T11:20:46,209 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3/cf1/7aa6ba5a0b4a46afb779e4ec71507e19, entries=1, sequenceid=5, filesize=4.7 K 2024-12-09T11:20:46,211 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~31 B/31, heapSize ~344 B/344, currentSize=0 B/0 for 4dc5ca2e3dd0f6286d8d8a4977d489a3 in 455ms, sequenceid=5, compaction requested=false 2024-12-09T11:20:46,211 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testReplayEditsAfterRegionMovedWithMultiCF' 2024-12-09T11:20:46,239 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-09T11:20:46,243 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. 2024-12-09T11:20:46,243 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1676): Region close journal for 4dc5ca2e3dd0f6286d8d8a4977d489a3: Waiting for close lock at 1733743245756Running coprocessor pre-close hooks at 1733743245756Disabling compacts and flushes for region at 1733743245756Disabling writes for close at 1733743245756Obtaining lock to block concurrent updates at 1733743245756Preparing flush snapshotting stores in 4dc5ca2e3dd0f6286d8d8a4977d489a3 at 1733743245756Finished memstore snapshotting testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3., syncing WAL and waiting on mvcc, flushsize=dataSize=31, getHeapSize=584, getOffHeapSize=0, getCellsCount=1 at 1733743245757 (+1 ms)Flushing stores of testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. at 1733743245758 (+1 ms)Flushing 4dc5ca2e3dd0f6286d8d8a4977d489a3/cf1: creating writer at 1733743245758Flushing 4dc5ca2e3dd0f6286d8d8a4977d489a3/cf1: appending metadata at 1733743245775 (+17 ms)Flushing 4dc5ca2e3dd0f6286d8d8a4977d489a3/cf1: closing flushed file at 1733743245775Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@40895151: reopening flushed file at 1733743246199 (+424 ms)Finished flush of dataSize ~31 B/31, heapSize ~344 B/344, currentSize=0 B/0 for 4dc5ca2e3dd0f6286d8d8a4977d489a3 in 455ms, sequenceid=5, compaction requested=false at 1733743246211 (+12 ms)Writing region close event to WAL at 1733743246225 (+14 ms)Running coprocessor post-close hooks at 1733743246240 (+15 ms)Closed at 1733743246243 (+3 ms) 2024-12-09T11:20:46,244 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegionServer(3302): Adding 4dc5ca2e3dd0f6286d8d8a4977d489a3 move to 2dff3a36d44f,46259,1733743228656 record at close sequenceid=5 2024-12-09T11:20:46,248 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] handler.UnassignRegionHandler(157): Closed 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:46,249 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=7 updating hbase:meta row=4dc5ca2e3dd0f6286d8d8a4977d489a3, regionState=CLOSED 2024-12-09T11:20:46,252 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE, hasLock=false; CloseRegionProcedure 4dc5ca2e3dd0f6286d8d8a4977d489a3, server=2dff3a36d44f,39663,1733743228795 because future has completed 2024-12-09T11:20:46,261 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-09T11:20:46,261 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; CloseRegionProcedure 4dc5ca2e3dd0f6286d8d8a4977d489a3, server=2dff3a36d44f,39663,1733743228795 in 657 msec 2024-12-09T11:20:46,262 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=4dc5ca2e3dd0f6286d8d8a4977d489a3, REOPEN/MOVE; state=CLOSED, location=2dff3a36d44f,46259,1733743228656; forceNewPlan=false, retain=false 2024-12-09T11:20:46,413 INFO [2dff3a36d44f:42781 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-09T11:20:46,414 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=7 updating hbase:meta row=4dc5ca2e3dd0f6286d8d8a4977d489a3, regionState=OPENING, regionLocation=2dff3a36d44f,46259,1733743228656 2024-12-09T11:20:46,418 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=4dc5ca2e3dd0f6286d8d8a4977d489a3, REOPEN/MOVE because future has completed 2024-12-09T11:20:46,418 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=7, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4dc5ca2e3dd0f6286d8d8a4977d489a3, server=2dff3a36d44f,46259,1733743228656}] 2024-12-09T11:20:46,577 INFO [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] handler.AssignRegionHandler(132): Open testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. 2024-12-09T11:20:46,578 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(7752): Opening region: {ENCODED => 4dc5ca2e3dd0f6286d8d8a4977d489a3, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3.', STARTKEY => '', ENDKEY => ''} 2024-12-09T11:20:46,578 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterRegionMovedWithMultiCF 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:46,578 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:20:46,578 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(7794): checking encryption for 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:46,579 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(7797): checking classloading for 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:46,590 INFO [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:46,592 INFO [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4dc5ca2e3dd0f6286d8d8a4977d489a3 columnFamilyName cf1 2024-12-09T11:20:46,592 DEBUG [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:46,601 DEBUG [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3/cf1/7aa6ba5a0b4a46afb779e4ec71507e19 2024-12-09T11:20:46,602 INFO [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] regionserver.HStore(327): Store=4dc5ca2e3dd0f6286d8d8a4977d489a3/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:46,602 INFO [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:46,603 INFO [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4dc5ca2e3dd0f6286d8d8a4977d489a3 columnFamilyName cf2 2024-12-09T11:20:46,603 DEBUG [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:46,604 INFO [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] regionserver.HStore(327): Store=4dc5ca2e3dd0f6286d8d8a4977d489a3/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:46,604 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1038): replaying wal for 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:46,607 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:46,608 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:46,609 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1048): stopping wal replay for 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:46,609 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1060): Cleaning up temporary data for 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:46,610 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterRegionMovedWithMultiCF descriptor;using region.getMemStoreFlushHeapSize/# of families (64.0 M)) instead. 2024-12-09T11:20:46,611 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1093): writing seq id for 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:46,612 INFO [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1114): Opened 4dc5ca2e3dd0f6286d8d8a4977d489a3; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63669581, jitterRate=-0.05124931037425995}}}, FlushLargeStoresPolicy{flushSizeLowerBound=67108864} 2024-12-09T11:20:46,612 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:46,613 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1006): Region open journal for 4dc5ca2e3dd0f6286d8d8a4977d489a3: Running coprocessor pre-open hook at 1733743246579Writing region info on filesystem at 1733743246579Initializing all the Stores at 1733743246580 (+1 ms)Instantiating store for column family {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743246580Instantiating store for column family {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743246590 (+10 ms)Cleaning up temporary data from old regions at 1733743246609 (+19 ms)Running coprocessor post-open hooks at 1733743246612 (+3 ms)Region opened successfully at 1733743246613 (+1 ms) 2024-12-09T11:20:46,615 INFO [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegionServer(2236): Post open deploy tasks for testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3., pid=9, masterSystemTime=1733743246571 2024-12-09T11:20:46,618 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegionServer(2266): Finished post open deploy task for testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. 2024-12-09T11:20:46,618 INFO [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] handler.AssignRegionHandler(153): Opened testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. 2024-12-09T11:20:46,619 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=7 updating hbase:meta row=4dc5ca2e3dd0f6286d8d8a4977d489a3, regionState=OPEN, openSeqNum=9, regionLocation=2dff3a36d44f,46259,1733743228656 2024-12-09T11:20:46,622 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4dc5ca2e3dd0f6286d8d8a4977d489a3, server=2dff3a36d44f,46259,1733743228656 because future has completed 2024-12-09T11:20:46,627 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=7 2024-12-09T11:20:46,627 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; OpenRegionProcedure 4dc5ca2e3dd0f6286d8d8a4977d489a3, server=2dff3a36d44f,46259,1733743228656 in 206 msec 2024-12-09T11:20:46,629 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=4dc5ca2e3dd0f6286d8d8a4977d489a3, REOPEN/MOVE in 1.0360 sec 2024-12-09T11:20:46,650 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T11:20:46,653 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53548, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T11:20:46,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39663 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 139 connection: 172.17.0.3:45782 deadline: 1733743306658, exception=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=2dff3a36d44f port=46259 startCode=1733743228656. As of locationSeqNum=5. 2024-12-09T11:20:46,666 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3., hostname=2dff3a36d44f,39663,1733743228795, seqNum=2 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3., hostname=2dff3a36d44f,39663,1733743228795, seqNum=2, error=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=2dff3a36d44f port=46259 startCode=1733743228656. As of locationSeqNum=5. 2024-12-09T11:20:46,666 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3., hostname=2dff3a36d44f,39663,1733743228795, seqNum=2 is org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=2dff3a36d44f port=46259 startCode=1733743228656. As of locationSeqNum=5. 2024-12-09T11:20:46,666 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(84): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3., hostname=2dff3a36d44f,39663,1733743228795, seqNum=2 with the new location region=testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3., hostname=2dff3a36d44f,46259,1733743228656, seqNum=5 constructed by org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=2dff3a36d44f port=46259 startCode=1733743228656. As of locationSeqNum=5. 2024-12-09T11:20:46,772 DEBUG [Async-Client-Retry-Timer-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:20:46,775 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53556, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:20:46,787 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 4dc5ca2e3dd0f6286d8d8a4977d489a3 2/2 column families, dataSize=50 B heapSize=720 B 2024-12-09T11:20:46,817 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3/.tmp/cf1/ee515531e08749f1a12b70c9ba21a0c4 is 29, key is r1/cf1:/1733743246777/DeleteFamily/seqid=0 2024-12-09T11:20:46,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741888_1066 (size=4906) 2024-12-09T11:20:46,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741888_1066 (size=4906) 2024-12-09T11:20:46,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741888_1066 (size=4906) 2024-12-09T11:20:46,827 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=25 B at sequenceid=12 (bloomFilter=false), to=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3/.tmp/cf1/ee515531e08749f1a12b70c9ba21a0c4 2024-12-09T11:20:46,834 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for ee515531e08749f1a12b70c9ba21a0c4 2024-12-09T11:20:46,851 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3/.tmp/cf2/0c719d53e0714166a0edeca8d7aae13e is 29, key is r1/cf2:/1733743246777/DeleteFamily/seqid=0 2024-12-09T11:20:46,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741889_1067 (size=4906) 2024-12-09T11:20:46,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741889_1067 (size=4906) 2024-12-09T11:20:46,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741889_1067 (size=4906) 2024-12-09T11:20:46,861 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=25 B at sequenceid=12 (bloomFilter=false), to=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3/.tmp/cf2/0c719d53e0714166a0edeca8d7aae13e 2024-12-09T11:20:46,868 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 0c719d53e0714166a0edeca8d7aae13e 2024-12-09T11:20:46,869 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3/.tmp/cf1/ee515531e08749f1a12b70c9ba21a0c4 as hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3/cf1/ee515531e08749f1a12b70c9ba21a0c4 2024-12-09T11:20:46,877 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for ee515531e08749f1a12b70c9ba21a0c4 2024-12-09T11:20:46,877 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3/cf1/ee515531e08749f1a12b70c9ba21a0c4, entries=1, sequenceid=12, filesize=4.8 K 2024-12-09T11:20:46,878 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3/.tmp/cf2/0c719d53e0714166a0edeca8d7aae13e as hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3/cf2/0c719d53e0714166a0edeca8d7aae13e 2024-12-09T11:20:46,884 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 0c719d53e0714166a0edeca8d7aae13e 2024-12-09T11:20:46,884 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3/cf2/0c719d53e0714166a0edeca8d7aae13e, entries=1, sequenceid=12, filesize=4.8 K 2024-12-09T11:20:46,885 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~50 B/50, heapSize ~688 B/688, currentSize=0 B/0 for 4dc5ca2e3dd0f6286d8d8a4977d489a3 in 98ms, sequenceid=12, compaction requested=false 2024-12-09T11:20:46,885 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 4dc5ca2e3dd0f6286d8d8a4977d489a3: 2024-12-09T11:20:46,888 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-09T11:20:46,889 DEBUG [Time-limited test {}] regionserver.HStore(1541): 4dc5ca2e3dd0f6286d8d8a4977d489a3/cf1 is initiating major compaction (all files) 2024-12-09T11:20:46,889 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T11:20:46,889 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T11:20:46,890 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 4dc5ca2e3dd0f6286d8d8a4977d489a3/cf1 in testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. 2024-12-09T11:20:46,890 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3/cf1/7aa6ba5a0b4a46afb779e4ec71507e19, hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3/cf1/ee515531e08749f1a12b70c9ba21a0c4] into tmpdir=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3/.tmp, totalSize=9.5 K 2024-12-09T11:20:46,891 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 7aa6ba5a0b4a46afb779e4ec71507e19, keycount=1, bloomtype=NONE, size=4.7 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1733743245570 2024-12-09T11:20:46,892 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting ee515531e08749f1a12b70c9ba21a0c4, keycount=1, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=9223372036854775807 2024-12-09T11:20:46,907 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 4dc5ca2e3dd0f6286d8d8a4977d489a3#cf1#compaction#16 average throughput is NaN MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T11:20:46,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741890_1068 (size=4626) 2024-12-09T11:20:46,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741890_1068 (size=4626) 2024-12-09T11:20:46,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741890_1068 (size=4626) 2024-12-09T11:20:46,940 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3/.tmp/cf1/a20bec56cc5d45e9aad02ceb155c838b as hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3/cf1/a20bec56cc5d45e9aad02ceb155c838b 2024-12-09T11:20:46,956 INFO [Time-limited test {}] regionserver.HStore(1337): Completed major compaction of 2 (all) file(s) in 4dc5ca2e3dd0f6286d8d8a4977d489a3/cf1 of 4dc5ca2e3dd0f6286d8d8a4977d489a3 into a20bec56cc5d45e9aad02ceb155c838b(size=4.5 K), total size for store is 4.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T11:20:46,957 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 4dc5ca2e3dd0f6286d8d8a4977d489a3: 2024-12-09T11:20:46,957 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-12-09T11:20:46,957 DEBUG [Time-limited test {}] regionserver.HStore(1541): 4dc5ca2e3dd0f6286d8d8a4977d489a3/cf2 is initiating major compaction (all files) 2024-12-09T11:20:46,957 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T11:20:46,957 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T11:20:46,957 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 4dc5ca2e3dd0f6286d8d8a4977d489a3/cf2 in testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. 2024-12-09T11:20:46,957 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3/cf2/0c719d53e0714166a0edeca8d7aae13e] into tmpdir=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3/.tmp, totalSize=4.8 K 2024-12-09T11:20:46,958 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 0c719d53e0714166a0edeca8d7aae13e, keycount=1, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=9223372036854775807 2024-12-09T11:20:46,967 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 4dc5ca2e3dd0f6286d8d8a4977d489a3#cf2#compaction#17 average throughput is NaN MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T11:20:46,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741891_1069 (size=4592) 2024-12-09T11:20:46,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741891_1069 (size=4592) 2024-12-09T11:20:46,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741891_1069 (size=4592) 2024-12-09T11:20:46,989 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3/.tmp/cf2/f3160b05ac3549b08bbc5a8660ef2009 as hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3/cf2/f3160b05ac3549b08bbc5a8660ef2009 2024-12-09T11:20:46,998 INFO [Time-limited test {}] regionserver.HStore(1337): Completed major compaction of 1 (all) file(s) in 4dc5ca2e3dd0f6286d8d8a4977d489a3/cf2 of 4dc5ca2e3dd0f6286d8d8a4977d489a3 into f3160b05ac3549b08bbc5a8660ef2009(size=4.5 K), total size for store is 4.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T11:20:46,998 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 4dc5ca2e3dd0f6286d8d8a4977d489a3: 2024-12-09T11:20:47,005 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42781 {}] master.HMaster(2410): Client=jenkins//172.17.0.3 move hri=4dc5ca2e3dd0f6286d8d8a4977d489a3, source=2dff3a36d44f,46259,1733743228656, destination=2dff3a36d44f,39663,1733743228795, warming up region on 2dff3a36d44f,39663,1733743228795 2024-12-09T11:20:47,005 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42781 {}] master.HMaster(2414): Client=jenkins//172.17.0.3 move hri=4dc5ca2e3dd0f6286d8d8a4977d489a3, source=2dff3a36d44f,46259,1733743228656, destination=2dff3a36d44f,39663,1733743228795, running balancer 2024-12-09T11:20:47,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42781 {}] procedure2.ProcedureExecutor(1139): Stored pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=4dc5ca2e3dd0f6286d8d8a4977d489a3, REOPEN/MOVE 2024-12-09T11:20:47,007 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=4dc5ca2e3dd0f6286d8d8a4977d489a3, REOPEN/MOVE 2024-12-09T11:20:47,009 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39663 {}] regionserver.RSRpcServices(2066): Warmup testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. 2024-12-09T11:20:47,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39663 {}] regionserver.HRegion(7855): Warmup {ENCODED => 4dc5ca2e3dd0f6286d8d8a4977d489a3, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3.', STARTKEY => '', ENDKEY => ''} 2024-12-09T11:20:47,009 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=4dc5ca2e3dd0f6286d8d8a4977d489a3, regionState=CLOSING, regionLocation=2dff3a36d44f,46259,1733743228656 2024-12-09T11:20:47,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39663 {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:20:47,010 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42781 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=CLOSING, location=2dff3a36d44f,46259,1733743228656, table=testReplayEditsAfterRegionMovedWithMultiCF, region=4dc5ca2e3dd0f6286d8d8a4977d489a3. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-09T11:20:47,019 INFO [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:47,019 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=4dc5ca2e3dd0f6286d8d8a4977d489a3, REOPEN/MOVE because future has completed 2024-12-09T11:20:47,020 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T11:20:47,020 INFO [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4dc5ca2e3dd0f6286d8d8a4977d489a3 columnFamilyName cf1 2024-12-09T11:20:47,020 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE, hasLock=false; CloseRegionProcedure 4dc5ca2e3dd0f6286d8d8a4977d489a3, server=2dff3a36d44f,46259,1733743228656}] 2024-12-09T11:20:47,020 DEBUG [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:47,037 DEBUG [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3/cf1/7aa6ba5a0b4a46afb779e4ec71507e19 2024-12-09T11:20:47,044 DEBUG [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3/cf1/a20bec56cc5d45e9aad02ceb155c838b 2024-12-09T11:20:47,049 INFO [StoreFileOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-cf1-1 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for ee515531e08749f1a12b70c9ba21a0c4 2024-12-09T11:20:47,050 DEBUG [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3/cf1/ee515531e08749f1a12b70c9ba21a0c4 2024-12-09T11:20:47,050 INFO [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] regionserver.HStore(327): Store=4dc5ca2e3dd0f6286d8d8a4977d489a3/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:47,050 INFO [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:47,051 INFO [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4dc5ca2e3dd0f6286d8d8a4977d489a3 columnFamilyName cf2 2024-12-09T11:20:47,051 DEBUG [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:47,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42781 {}] procedure.ProcedureSyncWait(219): waitFor pid=10 2024-12-09T11:20:47,065 INFO [StoreFileOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-cf2-1 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 0c719d53e0714166a0edeca8d7aae13e 2024-12-09T11:20:47,065 DEBUG [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3/cf2/0c719d53e0714166a0edeca8d7aae13e 2024-12-09T11:20:47,071 DEBUG [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3/cf2/f3160b05ac3549b08bbc5a8660ef2009 2024-12-09T11:20:47,071 INFO [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] regionserver.HStore(327): Store=4dc5ca2e3dd0f6286d8d8a4977d489a3/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:47,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39663 {}] regionserver.HRegion(1722): Closing 4dc5ca2e3dd0f6286d8d8a4977d489a3, disabling compactions & flushes 2024-12-09T11:20:47,072 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39663 {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. 2024-12-09T11:20:47,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39663 {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. 2024-12-09T11:20:47,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39663 {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. after waiting 0 ms 2024-12-09T11:20:47,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39663 {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. 2024-12-09T11:20:47,079 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39663 {}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. 2024-12-09T11:20:47,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39663 {}] regionserver.HRegion(1676): Region close journal for 4dc5ca2e3dd0f6286d8d8a4977d489a3: Waiting for close lock at 1733743247071Disabling compacts and flushes for region at 1733743247071Disabling writes for close at 1733743247072 (+1 ms)Writing region close event to WAL at 1733743247079 (+7 ms)Closed at 1733743247079 2024-12-09T11:20:47,176 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] handler.UnassignRegionHandler(122): Close 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:47,176 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T11:20:47,176 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1722): Closing 4dc5ca2e3dd0f6286d8d8a4977d489a3, disabling compactions & flushes 2024-12-09T11:20:47,176 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. 2024-12-09T11:20:47,176 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. 2024-12-09T11:20:47,176 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. after waiting 0 ms 2024-12-09T11:20:47,176 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. 2024-12-09T11:20:47,177 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3/cf1/7aa6ba5a0b4a46afb779e4ec71507e19, hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3/cf1/ee515531e08749f1a12b70c9ba21a0c4] to archive 2024-12-09T11:20:47,180 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T11:20:47,184 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3/cf1/7aa6ba5a0b4a46afb779e4ec71507e19 to hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/archive/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3/cf1/7aa6ba5a0b4a46afb779e4ec71507e19 2024-12-09T11:20:47,186 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3/cf1/ee515531e08749f1a12b70c9ba21a0c4 to hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/archive/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3/cf1/ee515531e08749f1a12b70c9ba21a0c4 2024-12-09T11:20:47,203 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3/cf2/0c719d53e0714166a0edeca8d7aae13e] to archive 2024-12-09T11:20:47,205 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T11:20:47,209 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3/cf2/0c719d53e0714166a0edeca8d7aae13e to hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/archive/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3/cf2/0c719d53e0714166a0edeca8d7aae13e 2024-12-09T11:20:47,220 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3/recovered.edits/17.seqid, newMaxSeqId=17, maxSeqId=8 2024-12-09T11:20:47,221 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. 2024-12-09T11:20:47,221 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1676): Region close journal for 4dc5ca2e3dd0f6286d8d8a4977d489a3: Waiting for close lock at 1733743247176Running coprocessor pre-close hooks at 1733743247176Disabling compacts and flushes for region at 1733743247176Disabling writes for close at 1733743247176Writing region close event to WAL at 1733743247215 (+39 ms)Running coprocessor post-close hooks at 1733743247221 (+6 ms)Closed at 1733743247221 2024-12-09T11:20:47,221 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegionServer(3302): Adding 4dc5ca2e3dd0f6286d8d8a4977d489a3 move to 2dff3a36d44f,39663,1733743228795 record at close sequenceid=12 2024-12-09T11:20:47,224 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] handler.UnassignRegionHandler(157): Closed 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:47,226 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=4dc5ca2e3dd0f6286d8d8a4977d489a3, regionState=CLOSED 2024-12-09T11:20:47,229 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=10, state=RUNNABLE, hasLock=false; CloseRegionProcedure 4dc5ca2e3dd0f6286d8d8a4977d489a3, server=2dff3a36d44f,46259,1733743228656 because future has completed 2024-12-09T11:20:47,234 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=10 2024-12-09T11:20:47,234 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=10, state=SUCCESS, hasLock=false; CloseRegionProcedure 4dc5ca2e3dd0f6286d8d8a4977d489a3, server=2dff3a36d44f,46259,1733743228656 in 211 msec 2024-12-09T11:20:47,235 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=4dc5ca2e3dd0f6286d8d8a4977d489a3, REOPEN/MOVE; state=CLOSED, location=2dff3a36d44f,39663,1733743228795; forceNewPlan=false, retain=false 2024-12-09T11:20:47,386 INFO [2dff3a36d44f:42781 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-09T11:20:47,386 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=4dc5ca2e3dd0f6286d8d8a4977d489a3, regionState=OPENING, regionLocation=2dff3a36d44f,39663,1733743228795 2024-12-09T11:20:47,390 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=4dc5ca2e3dd0f6286d8d8a4977d489a3, REOPEN/MOVE because future has completed 2024-12-09T11:20:47,391 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4dc5ca2e3dd0f6286d8d8a4977d489a3, server=2dff3a36d44f,39663,1733743228795}] 2024-12-09T11:20:47,548 INFO [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. 2024-12-09T11:20:47,549 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 4dc5ca2e3dd0f6286d8d8a4977d489a3, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3.', STARTKEY => '', ENDKEY => ''} 2024-12-09T11:20:47,549 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterRegionMovedWithMultiCF 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:47,549 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:20:47,549 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:47,549 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:47,551 INFO [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:47,552 INFO [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4dc5ca2e3dd0f6286d8d8a4977d489a3 columnFamilyName cf1 2024-12-09T11:20:47,552 DEBUG [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:47,559 DEBUG [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3/cf1/a20bec56cc5d45e9aad02ceb155c838b 2024-12-09T11:20:47,559 INFO [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] regionserver.HStore(327): Store=4dc5ca2e3dd0f6286d8d8a4977d489a3/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:47,560 INFO [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:47,561 INFO [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4dc5ca2e3dd0f6286d8d8a4977d489a3 columnFamilyName cf2 2024-12-09T11:20:47,561 DEBUG [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:47,569 DEBUG [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3/cf2/f3160b05ac3549b08bbc5a8660ef2009 2024-12-09T11:20:47,569 INFO [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] regionserver.HStore(327): Store=4dc5ca2e3dd0f6286d8d8a4977d489a3/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:47,569 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:47,570 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:47,571 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:47,573 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:47,573 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:47,574 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterRegionMovedWithMultiCF descriptor;using region.getMemStoreFlushHeapSize/# of families (64.0 M)) instead. 2024-12-09T11:20:47,576 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:47,577 INFO [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 4dc5ca2e3dd0f6286d8d8a4977d489a3; next sequenceid=18; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63010182, jitterRate=-0.0610751211643219}}}, FlushLargeStoresPolicy{flushSizeLowerBound=67108864} 2024-12-09T11:20:47,577 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:47,577 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 4dc5ca2e3dd0f6286d8d8a4977d489a3: Running coprocessor pre-open hook at 1733743247549Writing region info on filesystem at 1733743247549Initializing all the Stores at 1733743247550 (+1 ms)Instantiating store for column family {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743247550Instantiating store for column family {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743247551 (+1 ms)Cleaning up temporary data from old regions at 1733743247573 (+22 ms)Running coprocessor post-open hooks at 1733743247577 (+4 ms)Region opened successfully at 1733743247577 2024-12-09T11:20:47,579 INFO [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3., pid=12, masterSystemTime=1733743247544 2024-12-09T11:20:47,581 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. 2024-12-09T11:20:47,581 INFO [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. 2024-12-09T11:20:47,582 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=4dc5ca2e3dd0f6286d8d8a4977d489a3, regionState=OPEN, openSeqNum=18, regionLocation=2dff3a36d44f,39663,1733743228795 2024-12-09T11:20:47,585 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4dc5ca2e3dd0f6286d8d8a4977d489a3, server=2dff3a36d44f,39663,1733743228795 because future has completed 2024-12-09T11:20:47,591 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=10 2024-12-09T11:20:47,591 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 4dc5ca2e3dd0f6286d8d8a4977d489a3, server=2dff3a36d44f,39663,1733743228795 in 196 msec 2024-12-09T11:20:47,594 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=4dc5ca2e3dd0f6286d8d8a4977d489a3, REOPEN/MOVE in 586 msec 2024-12-09T11:20:47,613 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T11:20:47,628 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34604, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T11:20:47,633 ERROR [Time-limited test {}] regionserver.HRegionServer(2442): ***** ABORTING region server 2dff3a36d44f,39663,1733743228795: testing ***** 2024-12-09T11:20:47,633 ERROR [Time-limited test {}] regionserver.HRegionServer(2447): RegionServer abort: loaded coprocessors are: [org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint] 2024-12-09T11:20:47,637 DEBUG [Time-limited test {}] util.JSONBean(135): Listing beans for java.lang:type=Memory 2024-12-09T11:20:47,638 DEBUG [Time-limited test {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=IPC 2024-12-09T11:20:47,645 DEBUG [Time-limited test {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Replication 2024-12-09T11:20:47,647 DEBUG [Time-limited test {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Server 2024-12-09T11:20:47,668 INFO [Time-limited test {}] regionserver.HRegionServer(2451): Dump of metrics as JSON on abort: { "beans": [ { "name": "java.lang:type=Memory", "modelerType": "sun.management.MemoryImpl", "ObjectPendingFinalizationCount": 0, "HeapMemoryUsage": { "committed": 1048576000, "init": 1048576000, "max": 2306867200, "used": 332413496 }, "NonHeapMemoryUsage": { "committed": 171704320, "init": 7667712, "max": -1, "used": 169119288 }, "Verbose": false, "ObjectName": "java.lang:type=Memory" } ], "beans": [ { "name": "Hadoop:service=HBase,name=RegionServer,sub=IPC", "modelerType": "RegionServer,sub=IPC", "tag.Context": "regionserver", "tag.Hostname": "2dff3a36d44f", "queueSize": 0, "numCallsInGeneralQueue": 0, "numCallsInReplicationQueue": 0, "numCallsInBulkLoadQueue": 0, "numCallsInPriorityQueue": 0, "numCallsInMetaPriorityQueue": 0, "numOpenConnections": 3, "numActiveHandler": 0, "numActiveGeneralHandler": 0, "numActivePriorityHandler": 0, "numActiveReplicationHandler": 0, "numGeneralCallsDropped": 0, "numLifoModeSwitches": 0, "numCallsInWriteQueue": 0, "numActiveBulkLoadHandler": 0, "numCallsInReadQueue": 0, "numCallsInScanQueue": 0, "numActiveWriteHandler": 0, "numActiveReadHandler": 0, "numActiveScanHandler": 0, "nettyDirectMemoryUsage": 67108864, "nettyTotalPendingOutboundBytes": 0, "nettyMaxPendingOutboundBytes": 0, "receivedBytes": 10271, "exceptions.RegionMovedException": 1, "authenticationSuccesses": 0, "authorizationFailures": 0, "exceptions.requestTooBig": 0, "UnwritableTime_num_ops": 0, "UnwritableTime_min": 0, "UnwritableTime_max": 0, "UnwritableTime_mean": 0, "UnwritableTime_25th_percentile": 0, "UnwritableTime_median": 0, "UnwritableTime_75th_percentile": 0, "UnwritableTime_90th_percentile": 0, "UnwritableTime_95th_percentile": 0, "UnwritableTime_98th_percentile": 0, "UnwritableTime_99th_percentile": 0, "UnwritableTime_99.9th_percentile": 0, "exceptions.OutOfOrderScannerNextException": 0, "exceptions.rpcThrottling": 0, "exceptions.otherExceptions": 0, "ProcessCallTime_num_ops": 39, "ProcessCallTime_min": 0, "ProcessCallTime_max": 74, "ProcessCallTime_mean": 5, "ProcessCallTime_25th_percentile": 1, "ProcessCallTime_median": 2, "ProcessCallTime_75th_percentile": 4, "ProcessCallTime_90th_percentile": 5, "ProcessCallTime_95th_percentile": 12, "ProcessCallTime_98th_percentile": 74, "ProcessCallTime_99th_percentile": 74, "ProcessCallTime_99.9th_percentile": 74, "ProcessCallTime_TimeRangeCount_0-1": 10, "ProcessCallTime_TimeRangeCount_1-3": 12, "ProcessCallTime_TimeRangeCount_3-10": 7, "ProcessCallTime_TimeRangeCount_10-30": 1, "ProcessCallTime_TimeRangeCount_30-100": 1, "exceptions.callQueueTooBig": 0, "QueueCallTime_num_ops": 39, "QueueCallTime_min": 0, "QueueCallTime_max": 2, "QueueCallTime_mean": 0, "QueueCallTime_25th_percentile": 0, "QueueCallTime_median": 0, "QueueCallTime_75th_percentile": 0, "QueueCallTime_90th_percentile": 0, "QueueCallTime_95th_percentile": 1, "QueueCallTime_98th_percentile": 2, "QueueCallTime_99th_percentile": 2, "QueueCallTime_99.9th_percentile": 2, "QueueCallTime_TimeRangeCount_0-1": 30, "QueueCallTime_TimeRangeCount_1-3": 1, "authenticationFailures": 0, "exceptions.multiResponseTooLarge": 0, "exceptions.callDropped": 0, "TotalCallTime_num_ops": 39, "TotalCallTime_min": 1, "TotalCallTime_max": 74, "TotalCallTime_mean": 5, "TotalCallTime_25th_percentile": 1, "TotalCallTime_median": 2, "TotalCallTime_75th_percentile": 4, "TotalCallTime_90th_percentile": 5, "TotalCallTime_95th_percentile": 13, "TotalCallTime_98th_percentile": 74, "TotalCallTime_99th_percentile": 74, "TotalCallTime_99.9th_percentile": 74, "TotalCallTime_TimeRangeCount_0-1": 9, "TotalCallTime_TimeRangeCount_1-3": 13, "TotalCallTime_TimeRangeCount_3-10": 7, "TotalCallTime_TimeRangeCount_10-30": 1, "TotalCallTime_TimeRangeCount_30-100": 1, "exceptions.RegionTooBusyException": 0, "exceptions.FailedSanityCheckException": 0, "ResponseSize_num_ops": 39, "ResponseSize_min": 0, "ResponseSize_max": 1139, "ResponseSize_mean": 160, "ResponseSize_25th_percentile": 2, "ResponseSize_median": 2, "ResponseSize_75th_percentile": 74, "ResponseSize_90th_percentile": 453, "ResponseSize_95th_percentile": 796, "ResponseSize_98th_percentile": 1001, "ResponseSize_99th_percentile": 1070, "ResponseSize_99.9th_percentile": 1132, "ResponseSize_SizeRangeCount_0-10": 22, "ResponseSize_SizeRangeCount_10-100": 4, "ResponseSize_SizeRangeCount_100-1000": 5, "exceptions.UnknownScannerException": 0, "exceptions": 1, "maxOutboundBytesExceeded": 0, "authenticationFallbacks": 0, "exceptions.quotaExceeded": 0, "exceptions.callTimedOut": 0, "exceptions.NotServingRegionException": 0, "authorizationSuccesses": 0, "exceptions.ScannerResetException": 0, "RequestSize_num_ops": 39, "RequestSize_min": 31, "RequestSize_max": 390, "RequestSize_mean": 204, "RequestSize_25th_percentile": 119, "RequestSize_median": 179, "RequestSize_75th_percentile": 330, "RequestSize_90th_percentile": 330, "RequestSize_95th_percentile": 365, "RequestSize_98th_percentile": 380, "RequestSize_99th_percentile": 385, "RequestSize_99.9th_percentile": 389, "RequestSize_SizeRangeCount_0-10": 2, "RequestSize_SizeRangeCount_100-1000": 29, "sentBytes": 5781 } ], "beans": [ { "name": "Hadoop:service=HBase,name=RegionServer,sub=Replication", "modelerType": "RegionServer,sub=Replication", "tag.Context": "regionserver", "tag.Hostname": "2dff3a36d44f", "source.shippedHFiles": 0, "Source.ageOfLastShippedOp_num_ops": 0, "Source.ageOfLastShippedOp_min": 0, "Source.ageOfLastShippedOp_max": 0, "Source.ageOfLastShippedOp_mean": 0, "Source.ageOfLastShippedOp_25th_percentile": 0, "Source.ageOfLastShippedOp_median": 0, "Source.ageOfLastShippedOp_75th_percentile": 0, "Source.ageOfLastShippedOp_90th_percentile": 0, "Source.ageOfLastShippedOp_95th_percentile": 0, "Source.ageOfLastShippedOp_98th_percentile": 0, "Source.ageOfLastShippedOp_99th_percentile": 0, "Source.ageOfLastShippedOp_99.9th_percentile": 0, "source.uncleanlyClosedLogs": 0, "source.closedLogsWithUnknownFileLength": 0, "source.walReaderEditsBufferUsage": 0, "source.repeatedLogFileBytes": 0, "source.sizeOfHFileRefsQueue": 0, "source.logReadInBytes": 0, "source.completedRecoverQueues": 0, "source.sizeOfLogQueue": 0, "source.restartedLogReading": 0, "source.failedRecoverQueues": 0, "source.ignoredUncleanlyClosedLogContentsInBytes": 0, "Sink.ageOfLastAppliedOp_num_ops": 0, "Sink.ageOfLastAppliedOp_min": 0, "Sink.ageOfLastAppliedOp_max": 0, "Sink.ageOfLastAppliedOp_mean": 0, "Sink.ageOfLastAppliedOp_25th_percentile": 0, "Sink.ageOfLastAppliedOp_median": 0, "Sink.ageOfLastAppliedOp_75th_percentile": 0, "Sink.ageOfLastAppliedOp_90th_percentile": 0, "Sink.ageOfLastAppliedOp_95th_percentile": 0, "Sink.ageOfLastAppliedOp_98th_percentile": 0, "Sink.ageOfLastAppliedOp_99th_percentile": 0, "Sink.ageOfLastAppliedOp_99.9th_percentile": 0, "source.logEditsRead": 0, "source.numInitializing": 0, "source.shippedOps": 0, "sink.appliedHFiles": 0, "source.logEditsFiltered": 0, "source.shippedBytes": 0, "sink.appliedOps": 0, "source.completedLogs": 0, "source.failedBatches": 0, "sink.failedBatches": 0, "source.shippedBatches": 0, "sink.appliedBatches": 0 } ], "beans": [ { "name": "Hadoop:service=HBase,name=RegionServer,sub=Server", "modelerType": "RegionServer,sub=Server", "tag.zookeeperQuorum": "127.0.0.1:56083", "tag.serverName": "2dff3a36d44f,46367,1733743228871", "tag.clusterId": "ed218c8a-0bff-4a39-941d-68ae72b73aae", "tag.Context": "regionserver", "tag.Hostname": "2dff3a36d44f", "regionCount": 0, "storeCount": 0, "hlogFileCount": 3, "hlogFileSize": 0, "storeFileCount": 0, "maxStoreFileCount": 0, "memStoreSize": 0, "memStoreHeapSize": 0, "memStoreOffHeapSize": 0, "storeFileSize": 0, "storeFileSizeGrowthRate": 0.0, "maxStoreFileAge": 0, "minStoreFileAge": 0, "avgStoreFileAge": 0, "numReferenceFiles": 0, "regionServerStartTime": 1733743228871, "averageRegionSize": 0, "storeFileIndexSize": 0, "staticIndexSize": 0, "staticBloomSize": 0, "bloomFilterRequestsCount": 0, "bloomFilterNegativeResultsCount": 0, "bloomFilterEligibleRequestsCount": 0, "mutationsWithoutWALCount": 0, "mutationsWithoutWALSize": 0, "percentFilesLocal": 0.0, "percentFilesLocalSecondaryRegions": 0.0, "totalBytesRead": 320685, "localBytesRead": 320685, "shortCircuitBytesRead": 0, "zeroCopyBytesRead": 0, "splitQueueLength": 0, "compactionQueueLength": 0, "smallCompactionQueueLength": 0, "largeCompactionQueueLength": 0, "flushQueueLength": 0, "blockCacheFreeSize": 922070024, "blockCacheCount": 0, "blockCacheDataBlockCount": 0, "blockCacheSize": 676856, "blockCacheCountHitPercent": 0.0, "blockCacheExpressHitPercent": 0.0, "l1CacheSize": 676856, "l1CacheFreeSize": 922070024, "l1CacheCount": 0, "l1CacheEvictionCount": 0, "l1CacheHitCount": 0, "l1CacheMissCount": 0, "l1CacheHitRatio": 0.0, "l1CacheMissRatio": 0.0, "l2CacheSize": 0, "l2CacheFreeSize": 0, "l2CacheCount": 0, "l2CacheEvictionCount": 0, "l2CacheHitCount": 0, "l2CacheMissCount": 0, "l2CacheHitRatio": 0.0, "l2CacheMissRatio": 0.0, "mobFileCacheCount": 0, "mobFileCacheHitPercent": 0.0, "readRequestRatePerSecond": 0.0, "writeRequestRatePerSecond": 0.0, "ByteBuffAllocatorHeapAllocationBytes": 227243, "ByteBuffAllocatorPoolAllocationBytes": 0, "ByteBuffAllocatorHeapAllocationRatio": 1.0, "ByteBuffAllocatorTotalBufferCount": 186, "ByteBuffAllocatorUsedBufferCount": 0, "activeScanners": 0, "totalRequestCount": 0, "totalRowActionRequestCount": 0, "readRequestCount": 0, "cpRequestCount": 0, "filteredReadRequestCount": 0, "writeRequestCount": 0, "rpcGetRequestCount": 0, "rpcFullScanRequestCount": 0, "rpcScanRequestCount": 0, "rpcMultiRequestCount": 0, "rpcMutateRequestCount": 0, "checkMutateFailedCount": 0, "checkMutatePassedCount": 0, "blockCacheHitCount": 0, "blockCacheHitCountPrimary": 0, "blockCacheHitCachingCount": 0, "blockCacheMissCount": 0, "blockCacheMissCountPrimary": 0, "blockCacheMissCachingCount": 0, "blockCacheEvictionCount": 0, "blockCacheEvictionCountPrimary": 0, "blockCacheFailedInsertionCount": 0, "blockCacheDataMissCount": 0, "blockCacheLeafIndexMissCount": 0, "blockCacheBloomChunkMissCount": 0, "blockCacheMetaMissCount": 0, "blockCacheRootIndexMissCount": 0, "blockCacheIntermediateIndexMissCount": 0, "blockCacheFileInfoMissCount": 0, "blockCacheGeneralBloomMetaMissCount": 0, "blockCacheDeleteFamilyBloomMissCount": 0, "blockCacheTrailerMissCount": 0, "blockCacheDataHitCount": 0, "blockCacheLeafIndexHitCount": 0, "blockCacheBloomChunkHitCount": 0, "blockCacheMetaHitCount": 0, "blockCacheRootIndexHitCount": 0, "blockCacheIntermediateIndexHitCount": 0, "blockCacheFileInfoHitCount": 0, "blockCacheGeneralBloomMetaHitCount": 0, "blockCacheDeleteFamilyBloomHitCount": 0, "blockCacheTrailerHitCount": 0, "updatesBlockedTime": 0, "flushedCellsCount": 0, "compactedCellsCount": 0, "majorCompactedCellsCount": 0, "flushedCellsSize": 0, "compactedCellsSize": 0, "majorCompactedCellsSize": 0, "cellsCountCompactedFromMob": 0, "cellsCountCompactedToMob": 0, "cellsSizeCompactedFromMob": 0, "cellsSizeCompactedToMob": 0, "mobFlushCount": 0, "mobFlushedCellsCount": 0, "mobFlushedCellsSize": 0, "mobScanCellsCount": 0, "mobScanCellsSize": 0, "mobFileCacheAccessCount": 0, "mobFileCacheMissCount": 0, "mobFileCacheEvictedCount": 0, "hedgedReads": 0, "hedgedReadWins": 0, "hedgedReadOpsInCurThread": 0, "blockedRequestCount": 0, "CheckAndMutate_num_ops": 0, "CheckAndMutate_min": 0, "CheckAndMutate_max": 0, "CheckAndMutate_mean": 0, "CheckAndMutate_25th_percentile": 0, "CheckAndMutate_median": 0, "CheckAndMutate_75th_percentile": 0, "CheckAndMutate_90th_percentile": 0, "CheckAndMutate_95th_percentile": 0, "CheckAndMutate_98th_percentile": 0, "CheckAndMutate_99th_percentile": 0, "CheckAndMutate_99.9th_percentile": 0, "MajorCompactionTime_num_ops": 0, "MajorCompactionTime_min": 0, "MajorCompactionTime_max": 0, "MajorCompactionTime_mean": 0, "MajorCompactionTime_25th_percentile": 0, "MajorCompactionTime_median": 0, "MajorCompactionTime_75th_percentile": 0, "MajorCompactionTime_90th_percentile": 0, "MajorCompactionTime_95th_percentile": 0, "MajorCompactionTime_98th_percentile": 0, "MajorCompactionTime_99th_percentile": 0, "MajorCompactionTime_99.9th_percentile": 0, "ScanTime_num_ops": 0, "ScanTime_min": 0, "ScanTime_max": 0, "ScanTime_mean": 0, "ScanTime_25th_percentile": 0, "ScanTime_median": 0, "ScanTime_75th_percentile": 0, "ScanTime_90th_percentile": 0, "ScanTime_95th_percentile": 0, "ScanTime_98th_percentile": 0, "ScanTime_99th_percentile": 0, "ScanTime_99.9th_percentile": 0, "CheckAndMutateBlockBytesScanned_num_ops": 0, "CheckAndMutateBlockBytesScanned_min": 0, "CheckAndMutateBlockBytesScanned_max": 0, "CheckAndMutateBlockBytesScanned_mean": 0, "CheckAndMutateBlockBytesScanned_25th_percentile": 0, "CheckAndMutateBlockBytesScanned_median": 0, "CheckAndMutateBlockBytesScanned_75th_percentile": 0, "CheckAndMutateBlockBytesScanned_90th_percentile": 0, "CheckAndMutateBlockBytesScanned_95th_percentile": 0, "CheckAndMutateBlockBytesScanned_98th_percentile": 0, "CheckAndMutateBlockBytesScanned_99th_percentile": 0, "CheckAndMutateBlockBytesScanned_99.9th_percentile": 0, "Put_num_ops": 0, "Put_min": 0, "Put_max": 0, "Put_mean": 0, "Put_25th_percentile": 0, "Put_median": 0, "Put_75th_percentile": 0, "Put_90th_percentile": 0, "Put_95th_percentile": 0, "Put_98th_percentile": 0, "Put_99th_percentile": 0, "Put_99.9th_percentile": 0, "splitRequestCount": 0, "AppendBlockBytesScanned_num_ops": 0, "AppendBlockBytesScanned_min": 0, "AppendBlockBytesScanned_max": 0, "AppendBlockBytesScanned_mean": 0, "AppendBlockBytesScanned_25th_percentile": 0, "AppendBlockBytesScanned_median": 0, "AppendBlockBytesScanned_75th_percentile": 0, "AppendBlockBytesScanned_90th_percentile": 0, "AppendBlockBytesScanned_95th_percentile": 0, "AppendBlockBytesScanned_98th_percentile": 0, "AppendBlockBytesScanned_99th_percentile": 0, "AppendBlockBytesScanned_99.9th_percentile": 0, "PutBatch_num_ops": 0, "PutBatch_min": 0, "PutBatch_max": 0, "PutBatch_mean": 0, "PutBatch_25th_percentile": 0, "PutBatch_median": 0, "PutBatch_75th_percentile": 0, "PutBatch_90th_percentile": 0, "PutBatch_95th_percentile": 0, "PutBatch_98th_percentile": 0, "PutBatch_99th_percentile": 0, "PutBatch_99.9th_percentile": 0, "IncrementBlockBytesScanned_num_ops": 0, "IncrementBlockBytesScanned_min": 0, "IncrementBlockBytesScanned_max": 0, "IncrementBlockBytesScanned_mean": 0, "IncrementBlockBytesScanned_25th_percentile": 0, "IncrementBlockBytesScanned_median": 0, "IncrementBlockBytesScanned_75th_percentile": 0, "IncrementBlockBytesScanned_90th_percentile": 0, "IncrementBlockBytesScanned_95th_percentile": 0, "IncrementBlockBytesScanned_98th_percentile": 0, "IncrementBlockBytesScanned_99th_percentile": 0, "IncrementBlockBytesScanned_99.9th_percentile": 0, "SplitTime_num_ops": 0, "SplitTime_min": 0, "SplitTime_max": 0, "SplitTime_mean": 0, "SplitTime_25th_percentile": 0, "SplitTime_median": 0, "SplitTime_75th_percentile": 0, "SplitTime_90th_percentile": 0, "SplitTime_95th_percentile": 0, "SplitTime_98th_percentile": 0, "SplitTime_99th_percentile": 0, "SplitTime_99.9th_percentile": 0, "GetBlockBytesScanned_num_ops": 0, "GetBlockBytesScanned_min": 0, "GetBlockBytesScanned_max": 0, "GetBlockBytesScanned_mean": 0, "GetBlockBytesScanned_25th_percentile": 0, "GetBlockBytesScanned_median": 0, "GetBlockBytesScanned_75th_percentile": 0, "GetBlockBytesScanned_90th_percentile": 0, "GetBlockBytesScanned_95th_percentile": 0, "GetBlockBytesScanned_98th_percentile": 0, "GetBlockBytesScanned_99th_percentile": 0, "GetBlockBytesScanned_99.9th_percentile": 0, "majorCompactedInputBytes": 0, "slowAppendCount": 0, "flushedOutputBytes": 0, "Replay_num_ops": 0, "Replay_min": 0, "Replay_max": 0, "Replay_mean": 0, "Replay_25th_percentile": 0, "Replay_median": 0, "Replay_75th_percentile": 0, "Replay_90th_percentile": 0, "Replay_95th_percentile": 0, "Replay_98th_percentile": 0, "Replay_99th_percentile": 0, "Replay_99.9th_percentile": 0, "MajorCompactionInputSize_num_ops": 0, "MajorCompactionInputSize_min": 0, "MajorCompactionInputSize_max": 0, "MajorCompactionInputSize_mean": 0, "MajorCompactionInputSize_25th_percentile": 0, "MajorCompactionInputSize_median": 0, "MajorCompactionInputSize_75th_percentile": 0, "MajorCompactionInputSize_90th_percentile": 0, "MajorCompactionInputSize_95th_percentile": 0, "MajorCompactionInputSize_98th_percentile": 0, "MajorCompactionInputSize_99th_percentile": 0, "MajorCompactionInputSize_99.9th_percentile": 0, "pauseInfoThresholdExceeded": 0, "CheckAndDelete_num_ops": 0, "CheckAndDelete_min": 0, "CheckAndDelete_max": 0, "CheckAndDelete_mean": 0, "CheckAndDelete_25th_percentile": 0, "CheckAndDelete_median": 0, "CheckAndDelete_75th_percentile": 0, "CheckAndDelete_90th_percentile": 0, "CheckAndDelete_95th_percentile": 0, "CheckAndDelete_98th_percentile": 0, "CheckAndDelete_99th_percentile": 0, "CheckAndDelete_99.9th_percentile": 0, "CompactionInputSize_num_ops": 0, "CompactionInputSize_min": 0, "CompactionInputSize_max": 0, "CompactionInputSize_mean": 0, "CompactionInputSize_25th_percentile": 0, "CompactionInputSize_median": 0, "CompactionInputSize_75th_percentile": 0, "CompactionInputSize_90th_percentile": 0, "CompactionInputSize_95th_percentile": 0, "CompactionInputSize_98th_percentile": 0, "CompactionInputSize_99th_percentile": 0, "CompactionInputSize_99.9th_percentile": 0, "flushedMemstoreBytes": 0, "majorCompactedOutputBytes": 0, "slowPutCount": 0, "compactedInputBytes": 0, "FlushOutputSize_num_ops": 0, "FlushOutputSize_min": 0, "FlushOutputSize_max": 0, "FlushOutputSize_mean": 0, "FlushOutputSize_25th_percentile": 0, "FlushOutputSize_median": 0, "FlushOutputSize_75th_percentile": 0, "FlushOutputSize_90th_percentile": 0, "FlushOutputSize_95th_percentile": 0, "FlushOutputSize_98th_percentile": 0, "FlushOutputSize_99th_percentile": 0, "FlushOutputSize_99.9th_percentile": 0, "PauseTimeWithGc_num_ops": 0, "PauseTimeWithGc_min": 0, "PauseTimeWithGc_max": 0, "PauseTimeWithGc_mean": 0, "PauseTimeWithGc_25th_percentile": 0, "PauseTimeWithGc_median": 0, "PauseTimeWithGc_75th_percentile": 0, "PauseTimeWithGc_90th_percentile": 0, "PauseTimeWithGc_95th_percentile": 0, "PauseTimeWithGc_98th_percentile": 0, "PauseTimeWithGc_99th_percentile": 0, "PauseTimeWithGc_99.9th_percentile": 0, "compactedOutputBytes": 0, "pauseWarnThresholdExceeded": 0, "ScanBlockBytesScanned_num_ops": 0, "ScanBlockBytesScanned_min": 0, "ScanBlockBytesScanned_max": 0, "ScanBlockBytesScanned_mean": 0, "ScanBlockBytesScanned_25th_percentile": 0, "ScanBlockBytesScanned_median": 0, "ScanBlockBytesScanned_75th_percentile": 0, "ScanBlockBytesScanned_90th_percentile": 0, "ScanBlockBytesScanned_95th_percentile": 0, "ScanBlockBytesScanned_98th_percentile": 0, "ScanBlockBytesScanned_99th_percentile": 0, "ScanBlockBytesScanned_99.9th_percentile": 0, "Increment_num_ops": 0, "Increment_min": 0, "Increment_max": 0, "Increment_mean": 0, "Increment_25th_percentile": 0, "Increment_median": 0, "Increment_75th_percentile": 0, "Increment_90th_percentile": 0, "Increment_95th_percentile": 0, "Increment_98th_percentile": 0, "Increment_99th_percentile": 0, "Increment_99.9th_percentile": 0, "Delete_num_ops": 0, "Delete_min": 0, "Delete_max": 0, "Delete_mean": 0, "Delete_25th_percentile": 0, "Delete_median": 0, "Delete_75th_percentile": 0, "Delete_90th_percentile": 0, "Delete_95th_percentile": 0, "Delete_98th_percentile": 0, "Delete_99th_percentile": 0, "Delete_99.9th_percentile": 0, "DeleteBatch_num_ops": 0, "DeleteBatch_min": 0, "DeleteBatch_max": 0, "DeleteBatch_mean": 0, "DeleteBatch_25th_percentile": 0, "DeleteBatch_median": 0, "DeleteBatch_75th_percentile": 0, "DeleteBatch_90th_percentile": 0, "DeleteBatch_95th_percentile": 0, "DeleteBatch_98th_percentile": 0, "DeleteBatch_99th_percentile": 0, "DeleteBatch_99.9th_percentile": 0, "blockBytesScannedCount": 0, "FlushMemstoreSize_num_ops": 0, "FlushMemstoreSize_min": 0, "FlushMemstoreSize_max": 0, "FlushMemstoreSize_mean": 0, "FlushMemstoreSize_25th_percentile": 0, "FlushMemstoreSize_median": 0, "FlushMemstoreSize_75th_percentile": 0, "FlushMemstoreSize_90th_percentile": 0, "FlushMemstoreSize_95th_percentile": 0, "FlushMemstoreSize_98th_percentile": 0, "FlushMemstoreSize_99th_percentile": 0, "FlushMemstoreSize_99.9th_percentile": 0, "CompactionInputFileCount_num_ops": 0, "CompactionInputFileCount_min": 0, "CompactionInputFileCount_max": 0, "CompactionInputFileCount_mean": 0, "CompactionInputFileCount_25th_percentile": 0, "CompactionInputFileCount_median": 0, "CompactionInputFileCount_75th_percentile": 0, "CompactionInputFileCount_90th_percentile": 0, "CompactionInputFileCount_95th_percentile": 0, "CompactionInputFileCount_98th_percentile": 0, "CompactionInputFileCount_99th_percentile": 0, "CompactionInputFileCount_99.9th_percentile": 0, "CompactionTime_num_ops": 0, "CompactionTime_min": 0, "CompactionTime_max": 0, "CompactionTime_mean": 0, "CompactionTime_25th_percentile": 0, "CompactionTime_median": 0, "CompactionTime_75th_percentile": 0, "CompactionTime_90th_percentile": 0, "CompactionTime_95th_percentile": 0, "CompactionTime_98th_percentile": 0, "CompactionTime_99th_percentile": 0, "CompactionTime_99.9th_percentile": 0, "Get_num_ops": 0, "Get_min": 0, "Get_max": 0, "Get_mean": 0, "Get_25th_percentile": 0, "Get_median": 0, "Get_75th_percentile": 0, "Get_90th_percentile": 0, "Get_95th_percentile": 0, "Get_98th_percentile": 0, "Get_99th_percentile": 0, "Get_99.9th_percentile": 0, "MajorCompactionInputFileCount_num_ops": 0, "MajorCompactionInputFileCount_min": 0, "MajorCompactionInputFileCount_max": 0, "MajorCompactionInputFileCount_mean": 0, "MajorCompactionInputFileCount_25th_percentile": 0, "MajorCompactionInputFileCount_median": 0, "MajorCompactionInputFileCount_75th_percentile": 0, "MajorCompactionInputFileCount_90th_percentile": 0, "MajorCompactionInputFileCount_95th_percentile": 0, "MajorCompactionInputFileCount_98th_percentile": 0, "MajorCompactionInputFileCount_99th_percentile": 0, "MajorCompactionInputFileCount_99.9th_percentile": 0, "scannerLeaseExpiredCount": 0, "CheckAndPut_num_ops": 0, "CheckAndPut_min": 0, "CheckAndPut_max": 0, "CheckAndPut_mean": 0, "CheckAndPut_25th_percentile": 0, "CheckAndPut_median": 0, "CheckAndPut_75th_percentile": 0, "CheckAndPut_90th_percentile": 0, "CheckAndPut_95th_percentile": 0, "CheckAndPut_98th_percentile": 0, "CheckAndPut_99th_percentile": 0, "CheckAndPut_99.9th_percentile": 0, "MajorCompactionOutputSize_num_ops": 0, "MajorCompactionOutputSize_min": 0, "MajorCompactionOutputSize_max": 0, "MajorCompactionOutputSize_mean": 0, "MajorCompactionOutputSize_25th_percentile": 0, "MajorCompactionOutputSize_median": 0, "MajorCompactionOutputSize_75th_percentile": 0, "MajorCompactionOutputSize_90th_percentile": 0, "MajorCompactionOutputSize_95th_percentile": 0, "MajorCompactionOutputSize_98th_percentile": 0, "MajorCompactionOutputSize_99th_percentile": 0, "MajorCompactionOutputSize_99.9th_percentile": 0, "CompactionOutputFileCount_num_ops": 0, "CompactionOutputFileCount_min": 0, "CompactionOutputFileCount_max": 0, "CompactionOutputFileCount_mean": 0, "CompactionOutputFileCount_25th_percentile": 0, "CompactionOutputFileCount_median": 0, "CompactionOutputFileCount_75th_percentile": 0, "CompactionOutputFileCount_90th_percentile": 0, "CompactionOutputFileCount_95th_percentile": 0, "CompactionOutputFileCount_98th_percentile": 0, "CompactionOutputFileCount_99th_percentile": 0, "CompactionOutputFileCount_99.9th_percentile": 0, "slowDeleteCount": 0, "FlushTime_num_ops": 0, "FlushTime_min": 0, "FlushTime_max": 0, "FlushTime_mean": 0, "FlushTime_25th_percentile": 0, "FlushTime_median": 0, "FlushTime_75th_percentile": 0, "FlushTime_90th_percentile": 0, "FlushTime_95th_percentile": 0, "FlushTime_98th_percentile": 0, "FlushTime_99th_percentile": 0, "FlushTime_99.9th_percentile": 0, "splitSuccessCount": 0, "MajorCompactionOutputFileCount_num_ops": 0, "MajorCompactionOutputFileCount_min": 0, "MajorCompactionOutputFileCount_max": 0, "MajorCompactionOutputFileCount_mean": 0, "MajorCompactionOutputFileCount_25th_percentile": 0, "MajorCompactionOutputFileCount_median": 0, "MajorCompactionOutputFileCount_75th_percentile": 0, "MajorCompactionOutputFileCount_90th_percentile": 0, "MajorCompactionOutputFileCount_95th_percentile": 0, "MajorCompactionOutputFileCount_98th_percentile": 0, "MajorCompactionOutputFileCount_99th_percentile": 0, "MajorCompactionOutputFileCount_99.9th_percentile": 0, "slowGetCount": 0, "ScanSize_num_ops": 0, "ScanSize_min": 0, "ScanSize_max": 0, "ScanSize_mean": 0, "ScanSize_25th_percentile": 0, "ScanSize_median": 0, "ScanSize_75th_percentile": 0, "ScanSize_90th_percentile": 0, "ScanSize_95th_percentile": 0, "ScanSize_98th_percentile": 0, "ScanSize_99th_percentile": 0, "ScanSize_99.9th_percentile": 0, "CompactionOutputSize_num_ops": 0, "CompactionOutputSize_min": 0, "CompactionOutputSize_max": 0, "CompactionOutputSize_mean": 0, "CompactionOutputSize_25th_percentile": 0, "CompactionOutputSize_median": 0, "CompactionOutputSize_75th_percentile": 0, "CompactionOutputSize_90th_percentile": 0, "CompactionOutputSize_95th_percentile": 0, "CompactionOutputSize_98th_percentile": 0, "CompactionOutputSize_99th_percentile": 0, "CompactionOutputSize_99.9th_percentile": 0, "PauseTimeWithoutGc_num_ops": 0, "PauseTimeWithoutGc_min": 0, "PauseTimeWithoutGc_max": 0, "PauseTimeWithoutGc_mean": 0, "PauseTimeWithoutGc_25th_percentile": 0, "PauseTimeWithoutGc_median": 0, "PauseTimeWithoutGc_75th_percentile": 0, "PauseTimeWithoutGc_90th_percentile": 0, "PauseTimeWithoutGc_95th_percentile": 0, "PauseTimeWithoutGc_98th_percentile": 0, "PauseTimeWithoutGc_99th_percentile": 0, "PauseTimeWithoutGc_99.9th_percentile": 0, "slowIncrementCount": 0, "Append_num_ops": 0, "Append_min": 0, "Append_max": 0, "Append_mean": 0, "Append_25th_percentile": 0, "Append_median": 0, "Append_75th_percentile": 0, "Append_90th_percentile": 0, "Append_95th_percentile": 0, "Append_98th_percentile": 0, "Append_99th_percentile": 0, "Append_99.9th_percentile": 0, "Bulkload_count": 0, "Bulkload_mean_rate": 0.0, "Bulkload_1min_rate": 0.0, "Bulkload_5min_rate": 0.0, "Bulkload_15min_rate": 0.0, "Bulkload_num_ops": 0, "Bulkload_min": 0, "Bulkload_max": 0, "Bulkload_mean": 0, "Bulkload_25th_percentile": 0, "Bulkload_median": 0, "Bulkload_75th_percentile": 0, "Bulkload_90th_percentile": 0, "Bulkload_95th_percentile": 0, "Bulkload_98th_percentile": 0, "Bulkload_99th_percentile": 0, "Bulkload_99.9th_percentile": 0 } ] } 2024-12-09T11:20:47,672 WARN [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42781 {}] master.MasterRpcServices(700): 2dff3a36d44f,39663,1733743228795 reported a fatal error: ***** ABORTING region server 2dff3a36d44f,39663,1733743228795: testing ***** 2024-12-09T11:20:47,677 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2dff3a36d44f,39663,1733743228795' ***** 2024-12-09T11:20:47,677 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: testing 2024-12-09T11:20:47,678 INFO [RS:1;2dff3a36d44f:39663 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T11:20:47,679 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T11:20:47,680 INFO [RS:1;2dff3a36d44f:39663 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager abruptly. 2024-12-09T11:20:47,680 INFO [RS:1;2dff3a36d44f:39663 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager abruptly. 2024-12-09T11:20:47,680 INFO [RS:1;2dff3a36d44f:39663 {}] regionserver.HRegionServer(3091): Received CLOSE for 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:47,680 INFO [RS:1;2dff3a36d44f:39663 {}] regionserver.HRegionServer(956): aborting server 2dff3a36d44f,39663,1733743228795 2024-12-09T11:20:47,680 INFO [RS:1;2dff3a36d44f:39663 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T11:20:47,681 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 4dc5ca2e3dd0f6286d8d8a4977d489a3, disabling compactions & flushes 2024-12-09T11:20:47,681 INFO [RS:1;2dff3a36d44f:39663 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;2dff3a36d44f:39663. 2024-12-09T11:20:47,681 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. 2024-12-09T11:20:47,681 DEBUG [RS:1;2dff3a36d44f:39663 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T11:20:47,681 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. 2024-12-09T11:20:47,681 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. after waiting 0 ms 2024-12-09T11:20:47,681 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. 2024-12-09T11:20:47,681 DEBUG [RS:1;2dff3a36d44f:39663 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:20:47,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46259 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Get size: 140 connection: 172.17.0.3:53556 deadline: 1733743307678, exception=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=2dff3a36d44f port=39663 startCode=1733743228795. As of locationSeqNum=12. 2024-12-09T11:20:47,682 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3., hostname=2dff3a36d44f,46259,1733743228656, seqNum=5 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3., hostname=2dff3a36d44f,46259,1733743228656, seqNum=5, error=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=2dff3a36d44f port=39663 startCode=1733743228795. As of locationSeqNum=12. 2024-12-09T11:20:47,683 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3., hostname=2dff3a36d44f,46259,1733743228656, seqNum=5 is org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=2dff3a36d44f port=39663 startCode=1733743228795. As of locationSeqNum=12. 2024-12-09T11:20:47,683 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.AsyncRegionLocatorHelper(84): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3., hostname=2dff3a36d44f,46259,1733743228656, seqNum=5 with the new location region=testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3., hostname=2dff3a36d44f,39663,1733743228795, seqNum=12 constructed by org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=2dff3a36d44f port=39663 startCode=1733743228795. As of locationSeqNum=12. 2024-12-09T11:20:47,684 INFO [RS:1;2dff3a36d44f:39663 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T11:20:47,684 INFO [RS:1;2dff3a36d44f:39663 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T11:20:47,684 INFO [RS:1;2dff3a36d44f:39663 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T11:20:47,684 INFO [RS:1;2dff3a36d44f:39663 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T11:20:47,687 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.regionserver.RegionServerAbortedException: Server 2dff3a36d44f,39663,1733743228795 aborting at org.apache.hadoop.hbase.ipc.ServerRpcConnection.processRequest(ServerRpcConnection.java:564) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.ServerRpcConnection.processOneRpc(ServerRpcConnection.java:364) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyServerRpcConnection.process(NettyServerRpcConnection.java:89) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcServerRequestDecoder.channelRead0(NettyRpcServerRequestDecoder.java:56) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcServerRequestDecoder.channelRead0(NettyRpcServerRequestDecoder.java:31) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler.channelRead(SimpleChannelInboundHandler.java:99) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:20:47,689 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(64): Try updating region=hbase:meta,,1.1588230740, hostname=2dff3a36d44f,39663,1733743228795, seqNum=-1 , the old value is region=hbase:meta,,1.1588230740, hostname=2dff3a36d44f,39663,1733743228795, seqNum=-1, error=org.apache.hadoop.hbase.regionserver.RegionServerAbortedException: Server 2dff3a36d44f,39663,1733743228795 aborting 2024-12-09T11:20:47,689 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=hbase:meta,,1.1588230740, hostname=2dff3a36d44f,39663,1733743228795, seqNum=-1 is org.apache.hadoop.hbase.regionserver.RegionServerAbortedException: Server 2dff3a36d44f,39663,1733743228795 aborting 2024-12-09T11:20:47,689 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(88): Try removing region=hbase:meta,,1.1588230740, hostname=2dff3a36d44f,39663,1733743228795, seqNum=-1 from cache 2024-12-09T11:20:47,693 INFO [RS:1;2dff3a36d44f:39663 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-09T11:20:47,693 DEBUG [RS:1;2dff3a36d44f:39663 {}] regionserver.HRegionServer(1325): Online Regions={4dc5ca2e3dd0f6286d8d8a4977d489a3=testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3., 1588230740=hbase:meta,,1.1588230740} 2024-12-09T11:20:47,693 DEBUG [RS:1;2dff3a36d44f:39663 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:47,693 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T11:20:47,693 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T11:20:47,693 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T11:20:47,693 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T11:20:47,693 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T11:20:47,696 ERROR [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1960): Memstore data size is 5811 in region hbase:meta,,1.1588230740 2024-12-09T11:20:47,696 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T11:20:47,696 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T11:20:47,696 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733743247693Running coprocessor pre-close hooks at 1733743247693Disabling compacts and flushes for region at 1733743247693Disabling writes for close at 1733743247693Writing region close event to WAL at 1733743247695 (+2 ms)Running coprocessor post-close hooks at 1733743247696 (+1 ms)Closed at 1733743247696 2024-12-09T11:20:47,697 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T11:20:47,699 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. 2024-12-09T11:20:47,699 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 4dc5ca2e3dd0f6286d8d8a4977d489a3: Waiting for close lock at 1733743247680Running coprocessor pre-close hooks at 1733743247681 (+1 ms)Disabling compacts and flushes for region at 1733743247681Disabling writes for close at 1733743247681Writing region close event to WAL at 1733743247699 (+18 ms)Running coprocessor post-close hooks at 1733743247699Closed at 1733743247699 2024-12-09T11:20:47,699 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. 2024-12-09T11:20:47,712 INFO [regionserver/2dff3a36d44f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T11:20:47,791 DEBUG [Async-Client-Retry-Timer-pool-0 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T11:20:47,793 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2dff3a36d44f,39663,1733743228795, seqNum=-1] 2024-12-09T11:20:47,794 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.regionserver.RegionServerAbortedException: Server 2dff3a36d44f,39663,1733743228795 aborting at org.apache.hadoop.hbase.ipc.ServerRpcConnection.processRequest(ServerRpcConnection.java:564) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.ServerRpcConnection.processOneRpc(ServerRpcConnection.java:364) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyServerRpcConnection.process(NettyServerRpcConnection.java:89) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcServerRequestDecoder.channelRead0(NettyRpcServerRequestDecoder.java:56) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcServerRequestDecoder.channelRead0(NettyRpcServerRequestDecoder.java:31) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler.channelRead(SimpleChannelInboundHandler.java:99) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:20:47,795 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(64): Try updating region=hbase:meta,,1.1588230740, hostname=2dff3a36d44f,39663,1733743228795, seqNum=-1 , the old value is region=hbase:meta,,1.1588230740, hostname=2dff3a36d44f,39663,1733743228795, seqNum=-1, error=org.apache.hadoop.hbase.regionserver.RegionServerAbortedException: Server 2dff3a36d44f,39663,1733743228795 aborting 2024-12-09T11:20:47,795 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=hbase:meta,,1.1588230740, hostname=2dff3a36d44f,39663,1733743228795, seqNum=-1 is org.apache.hadoop.hbase.regionserver.RegionServerAbortedException: Server 2dff3a36d44f,39663,1733743228795 aborting 2024-12-09T11:20:47,795 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(88): Try removing region=hbase:meta,,1.1588230740, hostname=2dff3a36d44f,39663,1733743228795, seqNum=-1 from cache 2024-12-09T11:20:47,894 INFO [RS:1;2dff3a36d44f:39663 {}] regionserver.HRegionServer(976): stopping server 2dff3a36d44f,39663,1733743228795; all regions closed. 2024-12-09T11:20:47,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741836_1012 (size=3561) 2024-12-09T11:20:47,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741836_1012 (size=3561) 2024-12-09T11:20:47,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741833_1009 (size=1407) 2024-12-09T11:20:47,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741833_1009 (size=1407) 2024-12-09T11:20:47,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741833_1009 (size=1407) 2024-12-09T11:20:47,911 DEBUG [RS:1;2dff3a36d44f:39663 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:20:47,911 INFO [RS:1;2dff3a36d44f:39663 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T11:20:47,911 INFO [RS:1;2dff3a36d44f:39663 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T11:20:47,911 INFO [RS:1;2dff3a36d44f:39663 {}] hbase.ChoreService(370): Chore service for: regionserver/2dff3a36d44f:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T11:20:47,912 INFO [RS:1;2dff3a36d44f:39663 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T11:20:47,912 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T11:20:47,912 INFO [RS:1;2dff3a36d44f:39663 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:39663 2024-12-09T11:20:47,920 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39663-0x1012ae9bf670002, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2dff3a36d44f,39663,1733743228795 2024-12-09T11:20:47,920 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42781-0x1012ae9bf670000, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T11:20:47,920 INFO [RS:1;2dff3a36d44f:39663 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T11:20:47,923 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2dff3a36d44f,39663,1733743228795] 2024-12-09T11:20:47,924 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2dff3a36d44f,39663,1733743228795 already deleted, retry=false 2024-12-09T11:20:47,925 INFO [RegionServerTracker-0 {}] master.ServerManager(695): Processing expiration of 2dff3a36d44f,39663,1733743228795 on 2dff3a36d44f,42781,1733743227566 2024-12-09T11:20:47,930 DEBUG [RegionServerTracker-0 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:SERVER_CRASH_START, hasLock=false; ServerCrashProcedure 2dff3a36d44f,39663,1733743228795, splitWal=true, meta=true 2024-12-09T11:20:47,933 INFO [RegionServerTracker-0 {}] assignment.AssignmentManager(1999): Scheduled ServerCrashProcedure pid=13 for 2dff3a36d44f,39663,1733743228795 (carryingMeta=true) 2dff3a36d44f,39663,1733743228795/CRASHED/regionCount=2/lock=java.util.concurrent.locks.ReentrantReadWriteLock@17b8fc6[Write locks = 1, Read locks = 0], oldState=ONLINE. 2024-12-09T11:20:47,933 INFO [PEWorker-5 {}] procedure.ServerCrashProcedure(169): Start pid=13, state=RUNNABLE:SERVER_CRASH_START, hasLock=true; ServerCrashProcedure 2dff3a36d44f,39663,1733743228795, splitWal=true, meta=true 2024-12-09T11:20:47,935 INFO [PEWorker-5 {}] procedure.ServerCrashProcedure(339): Splitting WALs pid=13, state=RUNNABLE:SERVER_CRASH_SPLIT_META_LOGS, hasLock=true; ServerCrashProcedure 2dff3a36d44f,39663,1733743228795, splitWal=true, meta=true, isMeta: true 2024-12-09T11:20:47,937 DEBUG [PEWorker-5 {}] master.MasterWalManager(329): Renamed region directory: hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/WALs/2dff3a36d44f,39663,1733743228795-splitting 2024-12-09T11:20:47,938 INFO [PEWorker-5 {}] master.SplitWALManager(105): 2dff3a36d44f,39663,1733743228795 WAL count=1, meta=true 2024-12-09T11:20:47,941 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE:ACQUIRE_SPLIT_WAL_WORKER, hasLock=false; SplitWALProcedure 2dff3a36d44f%2C39663%2C1733743228795.meta.1733743230757.meta}] 2024-12-09T11:20:47,949 DEBUG [PEWorker-2 {}] master.SplitWALManager(158): Acquired split WAL worker=2dff3a36d44f,46259,1733743228656 2024-12-09T11:20:47,951 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE, hasLock=false; SplitWALRemoteProcedure 2dff3a36d44f%2C39663%2C1733743228795.meta.1733743230757.meta, worker=2dff3a36d44f,46259,1733743228656}] 2024-12-09T11:20:48,000 DEBUG [Async-Client-Retry-Timer-pool-0 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T11:20:48,002 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2dff3a36d44f,39663,1733743228795, seqNum=-1] 2024-12-09T11:20:48,004 WARN [RPCClient-NioEventLoopGroup-6-3 {}] ipc.NettyRpcConnection$2(409): Exception encountered while connecting to the server 2dff3a36d44f:39663 org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: Connection refused: 2dff3a36d44f/172.17.0.3:39663 Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hbase.thirdparty.io.netty.channel.socket.nio.NioSocketChannel.doFinishConnect(NioSocketChannel.java:336) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioChannel$AbstractNioUnsafe.finishConnect(AbstractNioChannel.java:339) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:776) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:20:48,005 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(64): Try updating region=hbase:meta,,1.1588230740, hostname=2dff3a36d44f,39663,1733743228795, seqNum=-1 , the old value is region=hbase:meta,,1.1588230740, hostname=2dff3a36d44f,39663,1733743228795, seqNum=-1, error=java.net.ConnectException: Call to address=2dff3a36d44f:39663 failed on connection exception: org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: Connection refused: 2dff3a36d44f/172.17.0.3:39663 2024-12-09T11:20:48,005 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=hbase:meta,,1.1588230740, hostname=2dff3a36d44f,39663,1733743228795, seqNum=-1 is java.net.ConnectException: Connection refused 2024-12-09T11:20:48,005 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(88): Try removing region=hbase:meta,,1.1588230740, hostname=2dff3a36d44f,39663,1733743228795, seqNum=-1 from cache 2024-12-09T11:20:48,006 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.FailedServers(52): Added failed server with address 2dff3a36d44f:39663 to list caused by org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: Connection refused: 2dff3a36d44f/172.17.0.3:39663 2024-12-09T11:20:48,024 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39663-0x1012ae9bf670002, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T11:20:48,024 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39663-0x1012ae9bf670002, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T11:20:48,024 INFO [RS:1;2dff3a36d44f:39663 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T11:20:48,024 INFO [RS:1;2dff3a36d44f:39663 {}] regionserver.HRegionServer(1031): Exiting; stopping=2dff3a36d44f,39663,1733743228795; zookeeper connection closed. 2024-12-09T11:20:48,025 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@10f61d01 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@10f61d01 2024-12-09T11:20:48,113 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46259 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SplitWALCallable, pid=15 2024-12-09T11:20:48,134 INFO [RS_LOG_REPLAY_OPS-regionserver/2dff3a36d44f:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(299): Splitting hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/WALs/2dff3a36d44f,39663,1733743228795-splitting/2dff3a36d44f%2C39663%2C1733743228795.meta.1733743230757.meta, size=3.5 K (3561bytes) 2024-12-09T11:20:48,134 INFO [RS_LOG_REPLAY_OPS-regionserver/2dff3a36d44f:0-0 {event_type=RS_LOG_REPLAY, pid=15}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/WALs/2dff3a36d44f,39663,1733743228795-splitting/2dff3a36d44f%2C39663%2C1733743228795.meta.1733743230757.meta 2024-12-09T11:20:48,134 INFO [RS_LOG_REPLAY_OPS-regionserver/2dff3a36d44f:0-0 {event_type=RS_LOG_REPLAY, pid=15}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/WALs/2dff3a36d44f,39663,1733743228795-splitting/2dff3a36d44f%2C39663%2C1733743228795.meta.1733743230757.meta after 0ms 2024-12-09T11:20:48,137 DEBUG [RS_LOG_REPLAY_OPS-regionserver/2dff3a36d44f:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/WALs/2dff3a36d44f,39663,1733743228795-splitting/2dff3a36d44f%2C39663%2C1733743228795.meta.1733743230757.meta: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-09T11:20:48,137 INFO [RS_LOG_REPLAY_OPS-regionserver/2dff3a36d44f:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(310): Open hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/WALs/2dff3a36d44f,39663,1733743228795-splitting/2dff3a36d44f%2C39663%2C1733743228795.meta.1733743230757.meta took 4ms 2024-12-09T11:20:48,146 DEBUG [RS_LOG_REPLAY_OPS-regionserver/2dff3a36d44f:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(352): Last flushed sequenceid for 1588230740: last_flushed_sequence_id: 18446744073709551615 store_sequence_id { family_name: "info" sequence_id: 5 } store_sequence_id { family_name: "ns" sequence_id: 3 } store_sequence_id { family_name: "rep_barrier" sequence_id: 18446744073709551615 } store_sequence_id { family_name: "table" sequence_id: 6 } 2024-12-09T11:20:48,147 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testRegionMadeOfBulkLoadedFilesOnly 2024-12-09T11:20:48,147 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testRegionMadeOfBulkLoadedFilesOnly Metrics about Tables on a single HBase RegionServer 2024-12-09T11:20:48,147 DEBUG [RS_LOG_REPLAY_OPS-regionserver/2dff3a36d44f:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/WALs/2dff3a36d44f,39663,1733743228795-splitting/2dff3a36d44f%2C39663%2C1733743228795.meta.1733743230757.meta so closing down 2024-12-09T11:20:48,147 DEBUG [RS_LOG_REPLAY_OPS-regionserver/2dff3a36d44f:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-09T11:20:48,148 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testSequentialEditLogSeqNum 2024-12-09T11:20:48,148 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testSequentialEditLogSeqNum Metrics about Tables on a single HBase RegionServer 2024-12-09T11:20:48,149 INFO [RS_LOG_REPLAY_OPS-regionserver/2dff3a36d44f:0-0-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000004-2dff3a36d44f%2C39663%2C1733743228795.meta.1733743230757.meta.temp 2024-12-09T11:20:48,149 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T11:20:48,149 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T11:20:48,150 INFO [RS_LOG_REPLAY_OPS-regionserver/2dff3a36d44f:0-0-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/hbase/meta/1588230740/recovered.edits/0000000000000000004-2dff3a36d44f%2C39663%2C1733743228795.meta.1733743230757.meta.temp 2024-12-09T11:20:48,151 INFO [RS_LOG_REPLAY_OPS-regionserver/2dff3a36d44f:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.OutputSink(145): 3 split writer threads finished 2024-12-09T11:20:48,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741892_1070 (size=3346) 2024-12-09T11:20:48,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741892_1070 (size=3346) 2024-12-09T11:20:48,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741892_1070 (size=3346) 2024-12-09T11:20:48,165 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/hbase/meta/1588230740/recovered.edits/0000000000000000004-2dff3a36d44f%2C39663%2C1733743228795.meta.1733743230757.meta.temp (wrote 15 edits, skipped 0 edits in 0 ms) 2024-12-09T11:20:48,170 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/hbase/meta/1588230740/recovered.edits/0000000000000000004-2dff3a36d44f%2C39663%2C1733743228795.meta.1733743230757.meta.temp to hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/hbase/meta/1588230740/recovered.edits/0000000000000000018 2024-12-09T11:20:48,170 INFO [RS_LOG_REPLAY_OPS-regionserver/2dff3a36d44f:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(425): Processed 16 edits across 1 Regions in 32 ms; skipped=1; WAL=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/WALs/2dff3a36d44f,39663,1733743228795-splitting/2dff3a36d44f%2C39663%2C1733743228795.meta.1733743230757.meta, size=3.5 K, length=3561, corrupted=false, cancelled=false 2024-12-09T11:20:48,171 DEBUG [RS_LOG_REPLAY_OPS-regionserver/2dff3a36d44f:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(428): Completed split of hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/WALs/2dff3a36d44f,39663,1733743228795-splitting/2dff3a36d44f%2C39663%2C1733743228795.meta.1733743230757.meta, journal: Splitting hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/WALs/2dff3a36d44f,39663,1733743228795-splitting/2dff3a36d44f%2C39663%2C1733743228795.meta.1733743230757.meta, size=3.5 K (3561bytes) at 1733743248134Finishing writing output for hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/WALs/2dff3a36d44f,39663,1733743228795-splitting/2dff3a36d44f%2C39663%2C1733743228795.meta.1733743230757.meta so closing down at 1733743248147 (+13 ms)Creating recovered edits writer path=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/hbase/meta/1588230740/recovered.edits/0000000000000000004-2dff3a36d44f%2C39663%2C1733743228795.meta.1733743230757.meta.temp at 1733743248150 (+3 ms)3 split writer threads finished at 1733743248151 (+1 ms)Closed recovered edits writer path=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/hbase/meta/1588230740/recovered.edits/0000000000000000004-2dff3a36d44f%2C39663%2C1733743228795.meta.1733743230757.meta.temp (wrote 15 edits, skipped 0 edits in 0 ms) at 1733743248165 (+14 ms)Rename recovered edits hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/hbase/meta/1588230740/recovered.edits/0000000000000000004-2dff3a36d44f%2C39663%2C1733743228795.meta.1733743230757.meta.temp to hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/hbase/meta/1588230740/recovered.edits/0000000000000000018 at 1733743248170 (+5 ms)Processed 16 edits across 1 Regions in 32 ms; skipped=1; WAL=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/WALs/2dff3a36d44f,39663,1733743228795-splitting/2dff3a36d44f%2C39663%2C1733743228795.meta.1733743230757.meta, size=3.5 K, length=3561, corrupted=false, cancelled=false at 1733743248170 2024-12-09T11:20:48,171 DEBUG [RS_LOG_REPLAY_OPS-regionserver/2dff3a36d44f:0-0 {event_type=RS_LOG_REPLAY, pid=15}] regionserver.SplitLogWorker(218): Done splitting WAL hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/WALs/2dff3a36d44f,39663,1733743228795-splitting/2dff3a36d44f%2C39663%2C1733743228795.meta.1733743230757.meta 2024-12-09T11:20:48,172 DEBUG [RS_LOG_REPLAY_OPS-regionserver/2dff3a36d44f:0-0 {event_type=RS_LOG_REPLAY, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-12-09T11:20:48,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42781 {}] master.HMaster(4169): Remote procedure done, pid=15 2024-12-09T11:20:48,179 INFO [PEWorker-3 {}] wal.WALSplitUtil(143): Moved hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/WALs/2dff3a36d44f,39663,1733743228795-splitting/2dff3a36d44f%2C39663%2C1733743228795.meta.1733743230757.meta to hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/oldWALs 2024-12-09T11:20:48,182 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=15, resume processing ppid=14 2024-12-09T11:20:48,182 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=15, ppid=14, state=SUCCESS, hasLock=false; SplitWALRemoteProcedure 2dff3a36d44f%2C39663%2C1733743228795.meta.1733743230757.meta, worker=2dff3a36d44f,46259,1733743228656 in 228 msec 2024-12-09T11:20:48,184 DEBUG [PEWorker-1 {}] master.SplitWALManager(172): Release split WAL worker=2dff3a36d44f,46259,1733743228656 2024-12-09T11:20:48,188 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-12-09T11:20:48,188 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; SplitWALProcedure 2dff3a36d44f%2C39663%2C1733743228795.meta.1733743230757.meta, worker=2dff3a36d44f,46259,1733743228656 in 244 msec 2024-12-09T11:20:48,190 INFO [PEWorker-5 {}] master.SplitLogManager(171): hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/WALs/2dff3a36d44f,39663,1733743228795-splitting dir is empty, no logs to split. 2024-12-09T11:20:48,190 INFO [PEWorker-5 {}] master.SplitWALManager(105): 2dff3a36d44f,39663,1733743228795 WAL count=0, meta=true 2024-12-09T11:20:48,190 DEBUG [PEWorker-5 {}] procedure.ServerCrashProcedure(329): Check if 2dff3a36d44f,39663,1733743228795 WAL splitting is done? wals=0, meta=true 2024-12-09T11:20:48,193 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=16, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T11:20:48,194 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T11:20:48,196 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=16, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OPEN, location=null; forceNewPlan=true, retain=false 2024-12-09T11:20:48,311 DEBUG [Async-Client-Retry-Timer-pool-0 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T11:20:48,313 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2dff3a36d44f,39663,1733743228795, seqNum=-1] 2024-12-09T11:20:48,313 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.AbstractRpcClient(357): Not trying to connect to 2dff3a36d44f:39663 this server is in the failed servers list 2024-12-09T11:20:48,314 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncRegionLocatorHelper(64): Try updating region=hbase:meta,,1.1588230740, hostname=2dff3a36d44f,39663,1733743228795, seqNum=-1 , the old value is region=hbase:meta,,1.1588230740, hostname=2dff3a36d44f,39663,1733743228795, seqNum=-1, error=org.apache.hadoop.hbase.ipc.FailedServerException: Call to address=2dff3a36d44f:39663 failed on local exception: org.apache.hadoop.hbase.ipc.FailedServerException: This server is in the failed servers list: 2dff3a36d44f:39663 2024-12-09T11:20:48,314 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=hbase:meta,,1.1588230740, hostname=2dff3a36d44f,39663,1733743228795, seqNum=-1 is org.apache.hadoop.hbase.ipc.FailedServerException: This server is in the failed servers list: 2dff3a36d44f:39663 2024-12-09T11:20:48,314 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncRegionLocatorHelper(88): Try removing region=hbase:meta,,1.1588230740, hostname=2dff3a36d44f,39663,1733743228795, seqNum=-1 from cache 2024-12-09T11:20:48,346 DEBUG [2dff3a36d44f:42781 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=2, allServersCount=2 2024-12-09T11:20:48,347 DEBUG [2dff3a36d44f:42781 {}] balancer.BalancerClusterState(204): Hosts are {2dff3a36d44f=0} racks are {/default-rack=0} 2024-12-09T11:20:48,347 DEBUG [2dff3a36d44f:42781 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T11:20:48,347 DEBUG [2dff3a36d44f:42781 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T11:20:48,347 DEBUG [2dff3a36d44f:42781 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T11:20:48,347 DEBUG [2dff3a36d44f:42781 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T11:20:48,347 INFO [2dff3a36d44f:42781 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T11:20:48,347 INFO [2dff3a36d44f:42781 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T11:20:48,347 DEBUG [2dff3a36d44f:42781 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T11:20:48,347 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=16 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=2dff3a36d44f,46367,1733743228871 2024-12-09T11:20:48,349 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2dff3a36d44f,46367,1733743228871, state=OPENING 2024-12-09T11:20:48,353 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46367-0x1012ae9bf670003, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T11:20:48,353 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42781-0x1012ae9bf670000, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T11:20:48,353 DEBUG [PEWorker-4 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=16, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T11:20:48,353 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:20:48,353 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=2dff3a36d44f,46367,1733743228871}] 2024-12-09T11:20:48,353 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46259-0x1012ae9bf670001, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T11:20:48,353 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:20:48,354 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:20:48,507 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T11:20:48,509 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38773, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T11:20:48,513 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=17}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T11:20:48,513 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=17}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T11:20:48,514 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=17}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-09T11:20:48,516 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=17}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2dff3a36d44f%2C46367%2C1733743228871.meta, suffix=.meta, logDir=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/WALs/2dff3a36d44f,46367,1733743228871, archiveDir=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/oldWALs, maxLogs=32 2024-12-09T11:20:48,530 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=17}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/WALs/2dff3a36d44f,46367,1733743228871/2dff3a36d44f%2C46367%2C1733743228871.meta.1733743248516.meta, exclude list is [], retry=0 2024-12-09T11:20:48,533 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46359,DS-8f8100b1-22b1-43bf-9468-405dfca7481e,DISK] 2024-12-09T11:20:48,533 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34459,DS-f790a494-97c1-4864-a7b1-6442795d840b,DISK] 2024-12-09T11:20:48,533 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44093,DS-049b5d3a-b506-46ca-8e7a-3fb74c0e7d3e,DISK] 2024-12-09T11:20:48,538 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=17}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/WALs/2dff3a36d44f,46367,1733743228871/2dff3a36d44f%2C46367%2C1733743228871.meta.1733743248516.meta 2024-12-09T11:20:48,538 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=17}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39985:39985),(127.0.0.1/127.0.0.1:39935:39935),(127.0.0.1/127.0.0.1:43581:43581)] 2024-12-09T11:20:48,538 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T11:20:48,539 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=17}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T11:20:48,539 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T11:20:48,539 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T11:20:48,539 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T11:20:48,539 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:20:48,540 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T11:20:48,540 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T11:20:48,542 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T11:20:48,544 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T11:20:48,544 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:48,544 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:20:48,545 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T11:20:48,546 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T11:20:48,546 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:48,546 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:20:48,546 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T11:20:48,547 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T11:20:48,547 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:48,548 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:20:48,548 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T11:20:48,549 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T11:20:48,549 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:48,549 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:20:48,549 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T11:20:48,550 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/hbase/meta/1588230740 2024-12-09T11:20:48,551 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/hbase/meta/1588230740 2024-12-09T11:20:48,552 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/hbase/meta/1588230740/recovered.edits/0000000000000000018 2024-12-09T11:20:48,554 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=17}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/hbase/meta/1588230740/recovered.edits/0000000000000000018: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-09T11:20:48,557 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(5793): Applied 40, skipped 0, firstSequenceIdInLog=4, maxSequenceIdInLog=18, path=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/hbase/meta/1588230740/recovered.edits/0000000000000000018 2024-12-09T11:20:48,558 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.67 KB heapSize=9.66 KB 2024-12-09T11:20:48,586 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/hbase/meta/1588230740/.tmp/info/c0963490e8794eb094b8b6c1a8dace18 is 205, key is testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3./info:regioninfo/1733743247582/Put/seqid=0 2024-12-09T11:20:48,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741894_1072 (size=11177) 2024-12-09T11:20:48,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741894_1072 (size=11177) 2024-12-09T11:20:48,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741894_1072 (size=11177) 2024-12-09T11:20:48,604 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=5.46 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/hbase/meta/1588230740/.tmp/info/c0963490e8794eb094b8b6c1a8dace18 2024-12-09T11:20:48,634 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/hbase/meta/1588230740/.tmp/ns/cd22ac6b822f439f97fa392ddd56dd5e is 43, key is default/ns:d/1733743230945/Put/seqid=0 2024-12-09T11:20:48,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741895_1073 (size=5153) 2024-12-09T11:20:48,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741895_1073 (size=5153) 2024-12-09T11:20:48,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741895_1073 (size=5153) 2024-12-09T11:20:48,657 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/hbase/meta/1588230740/.tmp/ns/cd22ac6b822f439f97fa392ddd56dd5e 2024-12-09T11:20:48,679 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/hbase/meta/1588230740/.tmp/table/de4adee5dd0343718993ac9d85a2c796 is 78, key is testReplayEditsAfterRegionMovedWithMultiCF/table:state/1733743245400/Put/seqid=0 2024-12-09T11:20:48,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741896_1074 (size=5431) 2024-12-09T11:20:48,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741896_1074 (size=5431) 2024-12-09T11:20:48,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741896_1074 (size=5431) 2024-12-09T11:20:48,694 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=148 B at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/hbase/meta/1588230740/.tmp/table/de4adee5dd0343718993ac9d85a2c796 2024-12-09T11:20:48,701 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/hbase/meta/1588230740/.tmp/info/c0963490e8794eb094b8b6c1a8dace18 as hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/hbase/meta/1588230740/info/c0963490e8794eb094b8b6c1a8dace18 2024-12-09T11:20:48,708 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/hbase/meta/1588230740/info/c0963490e8794eb094b8b6c1a8dace18, entries=36, sequenceid=18, filesize=10.9 K 2024-12-09T11:20:48,710 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/hbase/meta/1588230740/.tmp/ns/cd22ac6b822f439f97fa392ddd56dd5e as hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/hbase/meta/1588230740/ns/cd22ac6b822f439f97fa392ddd56dd5e 2024-12-09T11:20:48,716 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/hbase/meta/1588230740/ns/cd22ac6b822f439f97fa392ddd56dd5e, entries=2, sequenceid=18, filesize=5.0 K 2024-12-09T11:20:48,717 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/hbase/meta/1588230740/.tmp/table/de4adee5dd0343718993ac9d85a2c796 as hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/hbase/meta/1588230740/table/de4adee5dd0343718993ac9d85a2c796 2024-12-09T11:20:48,725 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/hbase/meta/1588230740/table/de4adee5dd0343718993ac9d85a2c796, entries=2, sequenceid=18, filesize=5.3 K 2024-12-09T11:20:48,725 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(3140): Finished flush of dataSize ~5.67 KB/5811, heapSize ~9.37 KB/9592, currentSize=0 B/0 for 1588230740 in 167ms, sequenceid=18, compaction requested=false; wal=null 2024-12-09T11:20:48,725 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T11:20:48,726 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/hbase/meta/1588230740/recovered.edits/0000000000000000018 2024-12-09T11:20:48,728 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T11:20:48,728 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T11:20:48,729 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T11:20:48,731 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T11:20:48,734 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=17}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/hbase/meta/1588230740/recovered.edits/18.seqid, newMaxSeqId=18, maxSeqId=1 2024-12-09T11:20:48,735 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=19; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60140903, jitterRate=-0.10383071005344391}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T11:20:48,735 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T11:20:48,736 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733743248540Writing region info on filesystem at 1733743248540Initializing all the Stores at 1733743248541 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743248541Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743248542 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743248542Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743248542Obtaining lock to block concurrent updates at 1733743248558 (+16 ms)Preparing flush snapshotting stores in 1588230740 at 1733743248558Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=5811, getHeapSize=9832, getOffHeapSize=0, getCellsCount=40 at 1733743248558Flushing stores of hbase:meta,,1.1588230740 at 1733743248558Flushing 1588230740/info: creating writer at 1733743248558Flushing 1588230740/info: appending metadata at 1733743248585 (+27 ms)Flushing 1588230740/info: closing flushed file at 1733743248585Flushing 1588230740/ns: creating writer at 1733743248611 (+26 ms)Flushing 1588230740/ns: appending metadata at 1733743248634 (+23 ms)Flushing 1588230740/ns: closing flushed file at 1733743248634Flushing 1588230740/table: creating writer at 1733743248663 (+29 ms)Flushing 1588230740/table: appending metadata at 1733743248679 (+16 ms)Flushing 1588230740/table: closing flushed file at 1733743248679Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2d471524: reopening flushed file at 1733743248700 (+21 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@77e65d9e: reopening flushed file at 1733743248709 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@500cfb8d: reopening flushed file at 1733743248716 (+7 ms)Finished flush of dataSize ~5.67 KB/5811, heapSize ~9.37 KB/9592, currentSize=0 B/0 for 1588230740 in 167ms, sequenceid=18, compaction requested=false; wal=null at 1733743248725 (+9 ms)Cleaning up temporary data from old regions at 1733743248728 (+3 ms)Running coprocessor post-open hooks at 1733743248735 (+7 ms)Region opened successfully at 1733743248736 (+1 ms) 2024-12-09T11:20:48,738 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=17, masterSystemTime=1733743248507 2024-12-09T11:20:48,740 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T11:20:48,740 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=17}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T11:20:48,741 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=16 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=19, regionLocation=2dff3a36d44f,46367,1733743228871 2024-12-09T11:20:48,742 INFO [PEWorker-1 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2dff3a36d44f,46367,1733743228871, state=OPEN 2024-12-09T11:20:48,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46367-0x1012ae9bf670003, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T11:20:48,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42781-0x1012ae9bf670000, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T11:20:48,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46259-0x1012ae9bf670001, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T11:20:48,744 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:20:48,744 DEBUG [PEWorker-1 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=17, ppid=16, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=2dff3a36d44f,46367,1733743228871 2024-12-09T11:20:48,744 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:20:48,744 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:20:48,747 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=17, resume processing ppid=16 2024-12-09T11:20:48,747 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=17, ppid=16, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=2dff3a36d44f,46367,1733743228871 in 391 msec 2024-12-09T11:20:48,753 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=16, resume processing ppid=13 2024-12-09T11:20:48,753 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=16, ppid=13, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 554 msec 2024-12-09T11:20:48,754 INFO [PEWorker-2 {}] procedure.ServerCrashProcedure(207): 2dff3a36d44f,39663,1733743228795 had 2 regions 2024-12-09T11:20:48,755 INFO [PEWorker-2 {}] procedure.ServerCrashProcedure(339): Splitting WALs pid=13, state=RUNNABLE:SERVER_CRASH_SPLIT_LOGS, hasLock=true; ServerCrashProcedure 2dff3a36d44f,39663,1733743228795, splitWal=true, meta=true, isMeta: false 2024-12-09T11:20:48,757 INFO [PEWorker-2 {}] master.SplitWALManager(105): 2dff3a36d44f,39663,1733743228795 WAL count=1, meta=false 2024-12-09T11:20:48,757 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=18, ppid=13, state=RUNNABLE:ACQUIRE_SPLIT_WAL_WORKER, hasLock=false; SplitWALProcedure 2dff3a36d44f%2C39663%2C1733743228795.1733743230365}] 2024-12-09T11:20:48,759 DEBUG [PEWorker-4 {}] master.SplitWALManager(158): Acquired split WAL worker=2dff3a36d44f,46259,1733743228656 2024-12-09T11:20:48,760 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE, hasLock=false; SplitWALRemoteProcedure 2dff3a36d44f%2C39663%2C1733743228795.1733743230365, worker=2dff3a36d44f,46259,1733743228656}] 2024-12-09T11:20:48,821 DEBUG [Async-Client-Retry-Timer-pool-0 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T11:20:48,822 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2dff3a36d44f,46367,1733743228871, seqNum=-1] 2024-12-09T11:20:48,822 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:20:48,825 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53864, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:20:48,913 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46259 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SplitWALCallable, pid=19 2024-12-09T11:20:48,933 INFO [RS_LOG_REPLAY_OPS-regionserver/2dff3a36d44f:0-1 {event_type=RS_LOG_REPLAY, pid=19}] wal.WALSplitter(299): Splitting hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/WALs/2dff3a36d44f,39663,1733743228795-splitting/2dff3a36d44f%2C39663%2C1733743228795.1733743230365, size=1.4 K (1407bytes) 2024-12-09T11:20:48,933 INFO [RS_LOG_REPLAY_OPS-regionserver/2dff3a36d44f:0-1 {event_type=RS_LOG_REPLAY, pid=19}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/WALs/2dff3a36d44f,39663,1733743228795-splitting/2dff3a36d44f%2C39663%2C1733743228795.1733743230365 2024-12-09T11:20:48,934 INFO [RS_LOG_REPLAY_OPS-regionserver/2dff3a36d44f:0-1 {event_type=RS_LOG_REPLAY, pid=19}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/WALs/2dff3a36d44f,39663,1733743228795-splitting/2dff3a36d44f%2C39663%2C1733743228795.1733743230365 after 1ms 2024-12-09T11:20:48,936 DEBUG [RS_LOG_REPLAY_OPS-regionserver/2dff3a36d44f:0-1 {event_type=RS_LOG_REPLAY, pid=19}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/WALs/2dff3a36d44f,39663,1733743228795-splitting/2dff3a36d44f%2C39663%2C1733743228795.1733743230365: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-09T11:20:48,937 INFO [RS_LOG_REPLAY_OPS-regionserver/2dff3a36d44f:0-1 {event_type=RS_LOG_REPLAY, pid=19}] wal.WALSplitter(310): Open hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/WALs/2dff3a36d44f,39663,1733743228795-splitting/2dff3a36d44f%2C39663%2C1733743228795.1733743230365 took 5ms 2024-12-09T11:20:48,940 DEBUG [RS_LOG_REPLAY_OPS-regionserver/2dff3a36d44f:0-1 {event_type=RS_LOG_REPLAY, pid=19}] wal.WALSplitter(352): Last flushed sequenceid for 4dc5ca2e3dd0f6286d8d8a4977d489a3: last_flushed_sequence_id: 12 store_sequence_id { family_name: "cf1" sequence_id: 12 } store_sequence_id { family_name: "cf2" sequence_id: 12 } 2024-12-09T11:20:48,940 DEBUG [RS_LOG_REPLAY_OPS-regionserver/2dff3a36d44f:0-1 {event_type=RS_LOG_REPLAY, pid=19}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/WALs/2dff3a36d44f,39663,1733743228795-splitting/2dff3a36d44f%2C39663%2C1733743228795.1733743230365 so closing down 2024-12-09T11:20:48,940 DEBUG [RS_LOG_REPLAY_OPS-regionserver/2dff3a36d44f:0-1 {event_type=RS_LOG_REPLAY, pid=19}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-09T11:20:48,941 INFO [RS_LOG_REPLAY_OPS-regionserver/2dff3a36d44f:0-1 {event_type=RS_LOG_REPLAY, pid=19}] wal.OutputSink(145): 3 split writer threads finished 2024-12-09T11:20:48,941 INFO [RS_LOG_REPLAY_OPS-regionserver/2dff3a36d44f:0-1 {event_type=RS_LOG_REPLAY, pid=19}] wal.WALSplitter(425): Processed 6 edits across 0 Regions in 4 ms; skipped=6; WAL=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/WALs/2dff3a36d44f,39663,1733743228795-splitting/2dff3a36d44f%2C39663%2C1733743228795.1733743230365, size=1.4 K, length=1407, corrupted=false, cancelled=false 2024-12-09T11:20:48,941 DEBUG [RS_LOG_REPLAY_OPS-regionserver/2dff3a36d44f:0-1 {event_type=RS_LOG_REPLAY, pid=19}] wal.WALSplitter(428): Completed split of hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/WALs/2dff3a36d44f,39663,1733743228795-splitting/2dff3a36d44f%2C39663%2C1733743228795.1733743230365, journal: Splitting hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/WALs/2dff3a36d44f,39663,1733743228795-splitting/2dff3a36d44f%2C39663%2C1733743228795.1733743230365, size=1.4 K (1407bytes) at 1733743248933Finishing writing output for hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/WALs/2dff3a36d44f,39663,1733743228795-splitting/2dff3a36d44f%2C39663%2C1733743228795.1733743230365 so closing down at 1733743248940 (+7 ms)3 split writer threads finished at 1733743248941 (+1 ms)Processed 6 edits across 0 Regions in 4 ms; skipped=6; WAL=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/WALs/2dff3a36d44f,39663,1733743228795-splitting/2dff3a36d44f%2C39663%2C1733743228795.1733743230365, size=1.4 K, length=1407, corrupted=false, cancelled=false at 1733743248941 2024-12-09T11:20:48,941 DEBUG [RS_LOG_REPLAY_OPS-regionserver/2dff3a36d44f:0-1 {event_type=RS_LOG_REPLAY, pid=19}] regionserver.SplitLogWorker(218): Done splitting WAL hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/WALs/2dff3a36d44f,39663,1733743228795-splitting/2dff3a36d44f%2C39663%2C1733743228795.1733743230365 2024-12-09T11:20:48,941 DEBUG [RS_LOG_REPLAY_OPS-regionserver/2dff3a36d44f:0-1 {event_type=RS_LOG_REPLAY, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-12-09T11:20:48,941 DEBUG [Async-Client-Retry-Timer-pool-0 {}] ipc.AbstractRpcClient(357): Not trying to connect to 2dff3a36d44f:39663 this server is in the failed servers list 2024-12-09T11:20:48,942 DEBUG [Async-Client-Retry-Timer-pool-0 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3., hostname=2dff3a36d44f,39663,1733743228795, seqNum=12 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3., hostname=2dff3a36d44f,39663,1733743228795, seqNum=12, error=org.apache.hadoop.hbase.ipc.FailedServerException: Call to address=2dff3a36d44f:39663 failed on local exception: org.apache.hadoop.hbase.ipc.FailedServerException: This server is in the failed servers list: 2dff3a36d44f:39663 2024-12-09T11:20:48,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42781 {}] master.HMaster(4169): Remote procedure done, pid=19 2024-12-09T11:20:48,942 DEBUG [Async-Client-Retry-Timer-pool-0 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3., hostname=2dff3a36d44f,39663,1733743228795, seqNum=12 is org.apache.hadoop.hbase.ipc.FailedServerException: This server is in the failed servers list: 2dff3a36d44f:39663 2024-12-09T11:20:48,942 DEBUG [Async-Client-Retry-Timer-pool-0 {}] client.AsyncRegionLocatorHelper(88): Try removing region=testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3., hostname=2dff3a36d44f,39663,1733743228795, seqNum=12 from cache 2024-12-09T11:20:48,946 INFO [PEWorker-1 {}] wal.WALSplitUtil(143): Moved hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/WALs/2dff3a36d44f,39663,1733743228795-splitting/2dff3a36d44f%2C39663%2C1733743228795.1733743230365 to hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/oldWALs 2024-12-09T11:20:48,949 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=19, resume processing ppid=18 2024-12-09T11:20:48,949 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=19, ppid=18, state=SUCCESS, hasLock=false; SplitWALRemoteProcedure 2dff3a36d44f%2C39663%2C1733743228795.1733743230365, worker=2dff3a36d44f,46259,1733743228656 in 186 msec 2024-12-09T11:20:48,955 DEBUG [PEWorker-5 {}] master.SplitWALManager(172): Release split WAL worker=2dff3a36d44f,46259,1733743228656 2024-12-09T11:20:48,959 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=18, resume processing ppid=13 2024-12-09T11:20:48,959 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=18, ppid=13, state=SUCCESS, hasLock=false; SplitWALProcedure 2dff3a36d44f%2C39663%2C1733743228795.1733743230365, worker=2dff3a36d44f,46259,1733743228656 in 198 msec 2024-12-09T11:20:48,961 INFO [PEWorker-2 {}] master.SplitLogManager(171): hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/WALs/2dff3a36d44f,39663,1733743228795-splitting dir is empty, no logs to split. 2024-12-09T11:20:48,961 INFO [PEWorker-2 {}] master.SplitWALManager(105): 2dff3a36d44f,39663,1733743228795 WAL count=0, meta=false 2024-12-09T11:20:48,961 DEBUG [PEWorker-2 {}] procedure.ServerCrashProcedure(329): Check if 2dff3a36d44f,39663,1733743228795 WAL splitting is done? wals=0, meta=false 2024-12-09T11:20:48,964 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=20, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=4dc5ca2e3dd0f6286d8d8a4977d489a3, ASSIGN}] 2024-12-09T11:20:48,965 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=20, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=4dc5ca2e3dd0f6286d8d8a4977d489a3, ASSIGN 2024-12-09T11:20:48,967 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=20, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=4dc5ca2e3dd0f6286d8d8a4977d489a3, ASSIGN; state=OPEN, location=null; forceNewPlan=true, retain=false 2024-12-09T11:20:49,117 DEBUG [2dff3a36d44f:42781 {}] balancer.BalancerClusterState(204): Hosts are {2dff3a36d44f=0} racks are {/default-rack=0} 2024-12-09T11:20:49,117 DEBUG [2dff3a36d44f:42781 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T11:20:49,117 DEBUG [2dff3a36d44f:42781 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T11:20:49,118 DEBUG [2dff3a36d44f:42781 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T11:20:49,118 DEBUG [2dff3a36d44f:42781 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T11:20:49,118 INFO [2dff3a36d44f:42781 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T11:20:49,118 INFO [2dff3a36d44f:42781 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T11:20:49,118 DEBUG [2dff3a36d44f:42781 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T11:20:49,118 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=20 updating hbase:meta row=4dc5ca2e3dd0f6286d8d8a4977d489a3, regionState=OPENING, regionLocation=2dff3a36d44f,46259,1733743228656 2024-12-09T11:20:49,119 WARN [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.NettyRpcConnection$2(409): Exception encountered while connecting to the server 2dff3a36d44f:39663 org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: finishConnect(..) failed: Connection refused: 2dff3a36d44f/172.17.0.3:39663 Caused by: java.net.ConnectException: finishConnect(..) failed: Connection refused at org.apache.hbase.thirdparty.io.netty.channel.unix.Errors.newConnectException0(Errors.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.unix.Errors.handleConnectErrno(Errors.java:131) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.unix.Socket.finishConnect(Socket.java:359) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.doFinishConnect(AbstractEpollChannel.java:715) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:692) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:20:49,120 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncRegionLocatorHelper(64): Try updating region=hbase:meta,,1.1588230740, hostname=2dff3a36d44f,39663,1733743228795, seqNum=-1 , the old value is region=hbase:meta,,1.1588230740, hostname=2dff3a36d44f,39663,1733743228795, seqNum=-1, error=java.net.ConnectException: Call to address=2dff3a36d44f:39663 failed on connection exception: org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: finishConnect(..) failed: Connection refused: 2dff3a36d44f/172.17.0.3:39663 2024-12-09T11:20:49,120 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=hbase:meta,,1.1588230740, hostname=2dff3a36d44f,39663,1733743228795, seqNum=-1 is java.net.ConnectException: finishConnect(..) failed: Connection refused 2024-12-09T11:20:49,120 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncRegionLocatorHelper(88): Try removing region=hbase:meta,,1.1588230740, hostname=2dff3a36d44f,39663,1733743228795, seqNum=-1 from cache 2024-12-09T11:20:49,120 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.FailedServers(52): Added failed server with address 2dff3a36d44f:39663 to list caused by org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: finishConnect(..) failed: Connection refused: 2dff3a36d44f/172.17.0.3:39663 2024-12-09T11:20:49,157 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testReplayEditsAfterRegionMovedWithMultiCF', row='r1', locateType=CURRENT is [region=testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3., hostname=2dff3a36d44f,39663,1733743228795, seqNum=18] 2024-12-09T11:20:49,158 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] ipc.AbstractRpcClient(357): Not trying to connect to 2dff3a36d44f:39663 this server is in the failed servers list 2024-12-09T11:20:49,158 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3., hostname=2dff3a36d44f,39663,1733743228795, seqNum=18 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3., hostname=2dff3a36d44f,39663,1733743228795, seqNum=18, error=org.apache.hadoop.hbase.ipc.FailedServerException: Call to address=2dff3a36d44f:39663 failed on local exception: org.apache.hadoop.hbase.ipc.FailedServerException: This server is in the failed servers list: 2dff3a36d44f:39663 2024-12-09T11:20:49,159 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3., hostname=2dff3a36d44f,39663,1733743228795, seqNum=18 is org.apache.hadoop.hbase.ipc.FailedServerException: This server is in the failed servers list: 2dff3a36d44f:39663 2024-12-09T11:20:49,159 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] client.AsyncRegionLocatorHelper(88): Try removing region=testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3., hostname=2dff3a36d44f,39663,1733743228795, seqNum=18 from cache 2024-12-09T11:20:49,231 DEBUG [Async-Client-Retry-Timer-pool-0 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T11:20:49,231 DEBUG [Async-Client-Retry-Timer-pool-0 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2dff3a36d44f,46367,1733743228871, seqNum=-1] 2024-12-09T11:20:49,231 DEBUG [Async-Client-Retry-Timer-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:20:49,233 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49719, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:20:49,236 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=20, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=4dc5ca2e3dd0f6286d8d8a4977d489a3, ASSIGN because future has completed 2024-12-09T11:20:49,237 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4dc5ca2e3dd0f6286d8d8a4977d489a3, server=2dff3a36d44f,46259,1733743228656}] 2024-12-09T11:20:49,395 INFO [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] handler.AssignRegionHandler(132): Open testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. 2024-12-09T11:20:49,395 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(7752): Opening region: {ENCODED => 4dc5ca2e3dd0f6286d8d8a4977d489a3, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3.', STARTKEY => '', ENDKEY => ''} 2024-12-09T11:20:49,396 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterRegionMovedWithMultiCF 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:49,396 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:20:49,396 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(7794): checking encryption for 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:49,396 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(7797): checking classloading for 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:49,402 INFO [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:49,403 INFO [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4dc5ca2e3dd0f6286d8d8a4977d489a3 columnFamilyName cf1 2024-12-09T11:20:49,403 DEBUG [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:49,417 DEBUG [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3/cf1/a20bec56cc5d45e9aad02ceb155c838b 2024-12-09T11:20:49,417 INFO [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] regionserver.HStore(327): Store=4dc5ca2e3dd0f6286d8d8a4977d489a3/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:49,417 INFO [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:49,418 INFO [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4dc5ca2e3dd0f6286d8d8a4977d489a3 columnFamilyName cf2 2024-12-09T11:20:49,418 DEBUG [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:49,427 DEBUG [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3/cf2/f3160b05ac3549b08bbc5a8660ef2009 2024-12-09T11:20:49,428 INFO [StoreOpener-4dc5ca2e3dd0f6286d8d8a4977d489a3-1 {}] regionserver.HStore(327): Store=4dc5ca2e3dd0f6286d8d8a4977d489a3/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:49,428 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1038): replaying wal for 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:49,429 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:49,431 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:49,432 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1048): stopping wal replay for 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:49,432 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1060): Cleaning up temporary data for 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:49,433 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterRegionMovedWithMultiCF descriptor;using region.getMemStoreFlushHeapSize/# of families (64.0 M)) instead. 2024-12-09T11:20:49,435 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1093): writing seq id for 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:49,436 INFO [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1114): Opened 4dc5ca2e3dd0f6286d8d8a4977d489a3; next sequenceid=18; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59184269, jitterRate=-0.11808566749095917}}}, FlushLargeStoresPolicy{flushSizeLowerBound=67108864} 2024-12-09T11:20:49,436 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:20:49,437 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1006): Region open journal for 4dc5ca2e3dd0f6286d8d8a4977d489a3: Running coprocessor pre-open hook at 1733743249396Writing region info on filesystem at 1733743249396Initializing all the Stores at 1733743249398 (+2 ms)Instantiating store for column family {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743249398Instantiating store for column family {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743249402 (+4 ms)Cleaning up temporary data from old regions at 1733743249432 (+30 ms)Running coprocessor post-open hooks at 1733743249436 (+4 ms)Region opened successfully at 1733743249437 (+1 ms) 2024-12-09T11:20:49,440 INFO [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegionServer(2236): Post open deploy tasks for testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3., pid=21, masterSystemTime=1733743249390 2024-12-09T11:20:49,443 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegionServer(2266): Finished post open deploy task for testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. 2024-12-09T11:20:49,443 INFO [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] handler.AssignRegionHandler(153): Opened testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. 2024-12-09T11:20:49,444 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=20 updating hbase:meta row=4dc5ca2e3dd0f6286d8d8a4977d489a3, regionState=OPEN, openSeqNum=18, regionLocation=2dff3a36d44f,46259,1733743228656 2024-12-09T11:20:49,447 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=21, ppid=20, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4dc5ca2e3dd0f6286d8d8a4977d489a3, server=2dff3a36d44f,46259,1733743228656 because future has completed 2024-12-09T11:20:49,455 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=21, resume processing ppid=20 2024-12-09T11:20:49,455 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=21, ppid=20, state=SUCCESS, hasLock=false; OpenRegionProcedure 4dc5ca2e3dd0f6286d8d8a4977d489a3, server=2dff3a36d44f,46259,1733743228656 in 211 msec 2024-12-09T11:20:49,459 INFO [PEWorker-1 {}] procedure.ServerCrashProcedure(291): removed crashed server 2dff3a36d44f,39663,1733743228795 after splitting done 2024-12-09T11:20:49,461 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=20, resume processing ppid=13 2024-12-09T11:20:49,461 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=20, ppid=13, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=4dc5ca2e3dd0f6286d8d8a4977d489a3, ASSIGN in 491 msec 2024-12-09T11:20:49,465 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; ServerCrashProcedure 2dff3a36d44f,39663,1733743228795, splitWal=true, meta=true in 1.5350 sec 2024-12-09T11:20:49,477 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testReplayEditsAfterRegionMovedWithMultiCF', row='r1', locateType=CURRENT is [region=testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3., hostname=2dff3a36d44f,46259,1733743228656, seqNum=18] 2024-12-09T11:20:49,495 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterRegionMovedWithMultiCF Thread=404 (was 408), OpenFileDescriptor=1042 (was 1013) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=392 (was 374) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=460 (was 718) 2024-12-09T11:20:49,497 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1042 is superior to 1024 2024-12-09T11:20:49,510 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterPartialFlush Thread=404, OpenFileDescriptor=1042, MaxFileDescriptor=1048576, SystemLoadAverage=392, ProcessCount=11, AvailableMemoryMB=458 2024-12-09T11:20:49,510 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1042 is superior to 1024 2024-12-09T11:20:49,526 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T11:20:49,528 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T11:20:49,529 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T11:20:49,532 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-72643509, suffix=, logDir=hdfs://localhost:40493/hbase/WALs/hregion-72643509, archiveDir=hdfs://localhost:40493/hbase/oldWALs, maxLogs=32 2024-12-09T11:20:49,548 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-72643509/hregion-72643509.1733743249533, exclude list is [], retry=0 2024-12-09T11:20:49,556 DEBUG [AsyncFSWAL-20-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44093,DS-049b5d3a-b506-46ca-8e7a-3fb74c0e7d3e,DISK] 2024-12-09T11:20:49,557 DEBUG [AsyncFSWAL-20-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46359,DS-8f8100b1-22b1-43bf-9468-405dfca7481e,DISK] 2024-12-09T11:20:49,559 DEBUG [AsyncFSWAL-20-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34459,DS-f790a494-97c1-4864-a7b1-6442795d840b,DISK] 2024-12-09T11:20:49,564 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-72643509/hregion-72643509.1733743249533 2024-12-09T11:20:49,568 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43581:43581),(127.0.0.1/127.0.0.1:39985:39985),(127.0.0.1/127.0.0.1:39935:39935)] 2024-12-09T11:20:49,568 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 36a6dde952053c47d3fade89f97bc472, NAME => 'testReplayEditsWrittenViaHRegion,,1733743249527.36a6dde952053c47d3fade89f97bc472.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenViaHRegion', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40493/hbase 2024-12-09T11:20:49,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741898_1076 (size=67) 2024-12-09T11:20:49,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741898_1076 (size=67) 2024-12-09T11:20:49,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741898_1076 (size=67) 2024-12-09T11:20:49,588 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1733743249527.36a6dde952053c47d3fade89f97bc472.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:20:49,590 INFO [StoreOpener-36a6dde952053c47d3fade89f97bc472-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 36a6dde952053c47d3fade89f97bc472 2024-12-09T11:20:49,597 INFO [StoreOpener-36a6dde952053c47d3fade89f97bc472-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 36a6dde952053c47d3fade89f97bc472 columnFamilyName a 2024-12-09T11:20:49,597 DEBUG [StoreOpener-36a6dde952053c47d3fade89f97bc472-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:49,598 INFO [StoreOpener-36a6dde952053c47d3fade89f97bc472-1 {}] regionserver.HStore(327): Store=36a6dde952053c47d3fade89f97bc472/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:49,598 INFO [StoreOpener-36a6dde952053c47d3fade89f97bc472-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 36a6dde952053c47d3fade89f97bc472 2024-12-09T11:20:49,599 INFO [StoreOpener-36a6dde952053c47d3fade89f97bc472-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 36a6dde952053c47d3fade89f97bc472 columnFamilyName b 2024-12-09T11:20:49,599 DEBUG [StoreOpener-36a6dde952053c47d3fade89f97bc472-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:49,600 INFO [StoreOpener-36a6dde952053c47d3fade89f97bc472-1 {}] regionserver.HStore(327): Store=36a6dde952053c47d3fade89f97bc472/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:49,600 INFO [StoreOpener-36a6dde952053c47d3fade89f97bc472-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 36a6dde952053c47d3fade89f97bc472 2024-12-09T11:20:49,601 INFO [StoreOpener-36a6dde952053c47d3fade89f97bc472-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 36a6dde952053c47d3fade89f97bc472 columnFamilyName c 2024-12-09T11:20:49,601 DEBUG [StoreOpener-36a6dde952053c47d3fade89f97bc472-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:49,602 INFO [StoreOpener-36a6dde952053c47d3fade89f97bc472-1 {}] regionserver.HStore(327): Store=36a6dde952053c47d3fade89f97bc472/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:49,602 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 36a6dde952053c47d3fade89f97bc472 2024-12-09T11:20:49,603 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/36a6dde952053c47d3fade89f97bc472 2024-12-09T11:20:49,603 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/36a6dde952053c47d3fade89f97bc472 2024-12-09T11:20:49,604 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 36a6dde952053c47d3fade89f97bc472 2024-12-09T11:20:49,604 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 36a6dde952053c47d3fade89f97bc472 2024-12-09T11:20:49,605 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-09T11:20:49,606 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 36a6dde952053c47d3fade89f97bc472 2024-12-09T11:20:49,609 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/36a6dde952053c47d3fade89f97bc472/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T11:20:49,609 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 36a6dde952053c47d3fade89f97bc472; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63926642, jitterRate=-0.047418802976608276}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-09T11:20:49,610 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 36a6dde952053c47d3fade89f97bc472: Writing region info on filesystem at 1733743249588Initializing all the Stores at 1733743249589 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743249589Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743249590 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743249590Cleaning up temporary data from old regions at 1733743249604 (+14 ms)Region opened successfully at 1733743249610 (+6 ms) 2024-12-09T11:20:49,610 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 36a6dde952053c47d3fade89f97bc472, disabling compactions & flushes 2024-12-09T11:20:49,610 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1733743249527.36a6dde952053c47d3fade89f97bc472. 2024-12-09T11:20:49,610 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1733743249527.36a6dde952053c47d3fade89f97bc472. 2024-12-09T11:20:49,610 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1733743249527.36a6dde952053c47d3fade89f97bc472. after waiting 0 ms 2024-12-09T11:20:49,610 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1733743249527.36a6dde952053c47d3fade89f97bc472. 2024-12-09T11:20:49,611 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1733743249527.36a6dde952053c47d3fade89f97bc472. 2024-12-09T11:20:49,611 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 36a6dde952053c47d3fade89f97bc472: Waiting for close lock at 1733743249610Disabling compacts and flushes for region at 1733743249610Disabling writes for close at 1733743249610Writing region close event to WAL at 1733743249611 (+1 ms)Closed at 1733743249611 2024-12-09T11:20:49,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741897_1075 (size=95) 2024-12-09T11:20:49,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741897_1075 (size=95) 2024-12-09T11:20:49,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741897_1075 (size=95) 2024-12-09T11:20:49,622 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-09T11:20:49,622 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-72643509:(num 1733743249533) 2024-12-09T11:20:49,622 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-09T11:20:49,625 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:40493/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733743249526, archiveDir=hdfs://localhost:40493/hbase/oldWALs, maxLogs=32 2024-12-09T11:20:49,643 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733743249526/wal.1733743249625, exclude list is [], retry=0 2024-12-09T11:20:49,646 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34459,DS-f790a494-97c1-4864-a7b1-6442795d840b,DISK] 2024-12-09T11:20:49,646 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46359,DS-8f8100b1-22b1-43bf-9468-405dfca7481e,DISK] 2024-12-09T11:20:49,647 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44093,DS-049b5d3a-b506-46ca-8e7a-3fb74c0e7d3e,DISK] 2024-12-09T11:20:49,648 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733743249526/wal.1733743249625 2024-12-09T11:20:49,648 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39935:39935),(127.0.0.1/127.0.0.1:39985:39985),(127.0.0.1/127.0.0.1:43581:43581)] 2024-12-09T11:20:49,649 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 36a6dde952053c47d3fade89f97bc472, NAME => 'testReplayEditsWrittenViaHRegion,,1733743249527.36a6dde952053c47d3fade89f97bc472.', STARTKEY => '', ENDKEY => ''} 2024-12-09T11:20:49,649 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1733743249527.36a6dde952053c47d3fade89f97bc472.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:20:49,649 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 36a6dde952053c47d3fade89f97bc472 2024-12-09T11:20:49,649 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 36a6dde952053c47d3fade89f97bc472 2024-12-09T11:20:49,650 INFO [StoreOpener-36a6dde952053c47d3fade89f97bc472-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 36a6dde952053c47d3fade89f97bc472 2024-12-09T11:20:49,651 INFO [StoreOpener-36a6dde952053c47d3fade89f97bc472-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 36a6dde952053c47d3fade89f97bc472 columnFamilyName a 2024-12-09T11:20:49,651 DEBUG [StoreOpener-36a6dde952053c47d3fade89f97bc472-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:49,652 INFO [StoreOpener-36a6dde952053c47d3fade89f97bc472-1 {}] regionserver.HStore(327): Store=36a6dde952053c47d3fade89f97bc472/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:49,652 INFO [StoreOpener-36a6dde952053c47d3fade89f97bc472-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 36a6dde952053c47d3fade89f97bc472 2024-12-09T11:20:49,653 INFO [StoreOpener-36a6dde952053c47d3fade89f97bc472-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 36a6dde952053c47d3fade89f97bc472 columnFamilyName b 2024-12-09T11:20:49,653 DEBUG [StoreOpener-36a6dde952053c47d3fade89f97bc472-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:49,653 INFO [StoreOpener-36a6dde952053c47d3fade89f97bc472-1 {}] regionserver.HStore(327): Store=36a6dde952053c47d3fade89f97bc472/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:49,654 INFO [StoreOpener-36a6dde952053c47d3fade89f97bc472-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 36a6dde952053c47d3fade89f97bc472 2024-12-09T11:20:49,654 INFO [StoreOpener-36a6dde952053c47d3fade89f97bc472-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 36a6dde952053c47d3fade89f97bc472 columnFamilyName c 2024-12-09T11:20:49,654 DEBUG [StoreOpener-36a6dde952053c47d3fade89f97bc472-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:49,655 INFO [StoreOpener-36a6dde952053c47d3fade89f97bc472-1 {}] regionserver.HStore(327): Store=36a6dde952053c47d3fade89f97bc472/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:49,655 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 36a6dde952053c47d3fade89f97bc472 2024-12-09T11:20:49,656 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/36a6dde952053c47d3fade89f97bc472 2024-12-09T11:20:49,657 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/36a6dde952053c47d3fade89f97bc472 2024-12-09T11:20:49,658 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 36a6dde952053c47d3fade89f97bc472 2024-12-09T11:20:49,658 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 36a6dde952053c47d3fade89f97bc472 2024-12-09T11:20:49,659 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-09T11:20:49,660 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 36a6dde952053c47d3fade89f97bc472 2024-12-09T11:20:49,660 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 36a6dde952053c47d3fade89f97bc472; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72782980, jitterRate=0.08455091714859009}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-09T11:20:49,661 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 36a6dde952053c47d3fade89f97bc472: Writing region info on filesystem at 1733743249649Initializing all the Stores at 1733743249650 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743249650Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743249650Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743249650Cleaning up temporary data from old regions at 1733743249658 (+8 ms)Region opened successfully at 1733743249661 (+3 ms) 2024-12-09T11:20:49,703 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 36a6dde952053c47d3fade89f97bc472 3/3 column families, dataSize=2.55 KB heapSize=5.44 KB 2024-12-09T11:20:49,727 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/36a6dde952053c47d3fade89f97bc472/.tmp/a/9fa1fef062ee4caab7465a442ccbdcf0 is 91, key is testReplayEditsWrittenViaHRegion/a:x0/1733743249661/Put/seqid=0 2024-12-09T11:20:49,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741900_1078 (size=5958) 2024-12-09T11:20:49,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741900_1078 (size=5958) 2024-12-09T11:20:49,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741900_1078 (size=5958) 2024-12-09T11:20:49,738 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=33 (bloomFilter=true), to=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/36a6dde952053c47d3fade89f97bc472/.tmp/a/9fa1fef062ee4caab7465a442ccbdcf0 2024-12-09T11:20:49,766 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/36a6dde952053c47d3fade89f97bc472/.tmp/b/b6df1bb8a02e481f9f47499419db34fd is 91, key is testReplayEditsWrittenViaHRegion/b:x0/1733743249682/Put/seqid=0 2024-12-09T11:20:49,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741901_1079 (size=5958) 2024-12-09T11:20:49,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741901_1079 (size=5958) 2024-12-09T11:20:49,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741901_1079 (size=5958) 2024-12-09T11:20:49,783 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=33 (bloomFilter=true), to=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/36a6dde952053c47d3fade89f97bc472/.tmp/b/b6df1bb8a02e481f9f47499419db34fd 2024-12-09T11:20:49,809 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/36a6dde952053c47d3fade89f97bc472/.tmp/c/817997ddcf394a31982f60be5779a42a is 91, key is testReplayEditsWrittenViaHRegion/c:x0/1733743249691/Put/seqid=0 2024-12-09T11:20:49,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741902_1080 (size=5958) 2024-12-09T11:20:49,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741902_1080 (size=5958) 2024-12-09T11:20:49,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741902_1080 (size=5958) 2024-12-09T11:20:49,818 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=33 (bloomFilter=true), to=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/36a6dde952053c47d3fade89f97bc472/.tmp/c/817997ddcf394a31982f60be5779a42a 2024-12-09T11:20:49,825 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/36a6dde952053c47d3fade89f97bc472/.tmp/a/9fa1fef062ee4caab7465a442ccbdcf0 as hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/36a6dde952053c47d3fade89f97bc472/a/9fa1fef062ee4caab7465a442ccbdcf0 2024-12-09T11:20:49,832 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/36a6dde952053c47d3fade89f97bc472/a/9fa1fef062ee4caab7465a442ccbdcf0, entries=10, sequenceid=33, filesize=5.8 K 2024-12-09T11:20:49,833 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/36a6dde952053c47d3fade89f97bc472/.tmp/b/b6df1bb8a02e481f9f47499419db34fd as hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/36a6dde952053c47d3fade89f97bc472/b/b6df1bb8a02e481f9f47499419db34fd 2024-12-09T11:20:49,849 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/36a6dde952053c47d3fade89f97bc472/b/b6df1bb8a02e481f9f47499419db34fd, entries=10, sequenceid=33, filesize=5.8 K 2024-12-09T11:20:49,850 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/36a6dde952053c47d3fade89f97bc472/.tmp/c/817997ddcf394a31982f60be5779a42a as hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/36a6dde952053c47d3fade89f97bc472/c/817997ddcf394a31982f60be5779a42a 2024-12-09T11:20:49,856 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/36a6dde952053c47d3fade89f97bc472/c/817997ddcf394a31982f60be5779a42a, entries=10, sequenceid=33, filesize=5.8 K 2024-12-09T11:20:49,858 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~2.55 KB/2610, heapSize ~5.39 KB/5520, currentSize=0 B/0 for 36a6dde952053c47d3fade89f97bc472 in 156ms, sequenceid=33, compaction requested=false 2024-12-09T11:20:49,858 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 36a6dde952053c47d3fade89f97bc472: 2024-12-09T11:20:49,858 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 36a6dde952053c47d3fade89f97bc472, disabling compactions & flushes 2024-12-09T11:20:49,858 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1733743249527.36a6dde952053c47d3fade89f97bc472. 2024-12-09T11:20:49,858 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1733743249527.36a6dde952053c47d3fade89f97bc472. 2024-12-09T11:20:49,858 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1733743249527.36a6dde952053c47d3fade89f97bc472. after waiting 0 ms 2024-12-09T11:20:49,858 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1733743249527.36a6dde952053c47d3fade89f97bc472. 2024-12-09T11:20:49,888 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1733743249527.36a6dde952053c47d3fade89f97bc472. 2024-12-09T11:20:49,888 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 36a6dde952053c47d3fade89f97bc472: Waiting for close lock at 1733743249858Disabling compacts and flushes for region at 1733743249858Disabling writes for close at 1733743249858Writing region close event to WAL at 1733743249888 (+30 ms)Closed at 1733743249888 2024-12-09T11:20:49,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741899_1077 (size=3384) 2024-12-09T11:20:49,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741899_1077 (size=3384) 2024-12-09T11:20:49,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741899_1077 (size=3384) 2024-12-09T11:20:49,897 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733743249526/wal.1733743249625 not finished, retry = 0 2024-12-09T11:20:50,003 DEBUG [Time-limited test {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/36a6dde952053c47d3fade89f97bc472/b/b6df1bb8a02e481f9f47499419db34fd to hdfs://localhost:40493/hbase/archive/data/default/testReplayEditsWrittenViaHRegion/36a6dde952053c47d3fade89f97bc472/b/b6df1bb8a02e481f9f47499419db34fd 2024-12-09T11:20:50,021 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:40493/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733743249526/wal.1733743249625, size=3.3 K (3384bytes) 2024-12-09T11:20:50,021 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40493/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733743249526/wal.1733743249625 2024-12-09T11:20:50,021 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:40493/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733743249526/wal.1733743249625 after 0ms 2024-12-09T11:20:50,024 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:40493/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733743249526/wal.1733743249625: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-09T11:20:50,024 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:40493/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733743249526/wal.1733743249625 took 4ms 2024-12-09T11:20:50,027 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:40493/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733743249526/wal.1733743249625 so closing down 2024-12-09T11:20:50,027 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-09T11:20:50,028 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1733743249625.temp 2024-12-09T11:20:50,030 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/36a6dde952053c47d3fade89f97bc472/recovered.edits/0000000000000000003-wal.1733743249625.temp 2024-12-09T11:20:50,031 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-09T11:20:50,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741903_1081 (size=2944) 2024-12-09T11:20:50,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741903_1081 (size=2944) 2024-12-09T11:20:50,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741903_1081 (size=2944) 2024-12-09T11:20:50,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741836_1012 (size=3561) 2024-12-09T11:20:50,445 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/36a6dde952053c47d3fade89f97bc472/recovered.edits/0000000000000000003-wal.1733743249625.temp (wrote 30 edits, skipped 0 edits in 0 ms) 2024-12-09T11:20:50,447 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/36a6dde952053c47d3fade89f97bc472/recovered.edits/0000000000000000003-wal.1733743249625.temp to hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/36a6dde952053c47d3fade89f97bc472/recovered.edits/0000000000000000032 2024-12-09T11:20:50,447 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 32 edits across 1 Regions in 422 ms; skipped=2; WAL=hdfs://localhost:40493/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733743249526/wal.1733743249625, size=3.3 K, length=3384, corrupted=false, cancelled=false 2024-12-09T11:20:50,447 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:40493/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733743249526/wal.1733743249625, journal: Splitting hdfs://localhost:40493/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733743249526/wal.1733743249625, size=3.3 K (3384bytes) at 1733743250021Finishing writing output for hdfs://localhost:40493/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733743249526/wal.1733743249625 so closing down at 1733743250027 (+6 ms)Creating recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/36a6dde952053c47d3fade89f97bc472/recovered.edits/0000000000000000003-wal.1733743249625.temp at 1733743250030 (+3 ms)3 split writer threads finished at 1733743250031 (+1 ms)Closed recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/36a6dde952053c47d3fade89f97bc472/recovered.edits/0000000000000000003-wal.1733743249625.temp (wrote 30 edits, skipped 0 edits in 0 ms) at 1733743250445 (+414 ms)Rename recovered edits hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/36a6dde952053c47d3fade89f97bc472/recovered.edits/0000000000000000003-wal.1733743249625.temp to hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/36a6dde952053c47d3fade89f97bc472/recovered.edits/0000000000000000032 at 1733743250447 (+2 ms)Processed 32 edits across 1 Regions in 422 ms; skipped=2; WAL=hdfs://localhost:40493/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733743249526/wal.1733743249625, size=3.3 K, length=3384, corrupted=false, cancelled=false at 1733743250447 2024-12-09T11:20:50,449 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:40493/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733743249526/wal.1733743249625 to hdfs://localhost:40493/hbase/oldWALs/wal.1733743249625 2024-12-09T11:20:50,450 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/36a6dde952053c47d3fade89f97bc472/recovered.edits/0000000000000000032 2024-12-09T11:20:50,450 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-09T11:20:50,452 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:40493/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733743249526, archiveDir=hdfs://localhost:40493/hbase/oldWALs, maxLogs=32 2024-12-09T11:20:50,471 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733743249526/wal.1733743250453, exclude list is [], retry=0 2024-12-09T11:20:50,478 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46359,DS-8f8100b1-22b1-43bf-9468-405dfca7481e,DISK] 2024-12-09T11:20:50,478 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44093,DS-049b5d3a-b506-46ca-8e7a-3fb74c0e7d3e,DISK] 2024-12-09T11:20:50,478 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34459,DS-f790a494-97c1-4864-a7b1-6442795d840b,DISK] 2024-12-09T11:20:50,499 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733743249526/wal.1733743250453 2024-12-09T11:20:50,502 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39985:39985),(127.0.0.1/127.0.0.1:43581:43581),(127.0.0.1/127.0.0.1:39935:39935)] 2024-12-09T11:20:50,503 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 36a6dde952053c47d3fade89f97bc472, NAME => 'testReplayEditsWrittenViaHRegion,,1733743249527.36a6dde952053c47d3fade89f97bc472.', STARTKEY => '', ENDKEY => ''} 2024-12-09T11:20:50,503 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1733743249527.36a6dde952053c47d3fade89f97bc472.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:20:50,503 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 36a6dde952053c47d3fade89f97bc472 2024-12-09T11:20:50,503 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 36a6dde952053c47d3fade89f97bc472 2024-12-09T11:20:50,505 INFO [StoreOpener-36a6dde952053c47d3fade89f97bc472-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 36a6dde952053c47d3fade89f97bc472 2024-12-09T11:20:50,506 INFO [StoreOpener-36a6dde952053c47d3fade89f97bc472-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 36a6dde952053c47d3fade89f97bc472 columnFamilyName a 2024-12-09T11:20:50,507 DEBUG [StoreOpener-36a6dde952053c47d3fade89f97bc472-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:50,529 DEBUG [StoreOpener-36a6dde952053c47d3fade89f97bc472-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/36a6dde952053c47d3fade89f97bc472/a/9fa1fef062ee4caab7465a442ccbdcf0 2024-12-09T11:20:50,529 INFO [StoreOpener-36a6dde952053c47d3fade89f97bc472-1 {}] regionserver.HStore(327): Store=36a6dde952053c47d3fade89f97bc472/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:50,529 INFO [StoreOpener-36a6dde952053c47d3fade89f97bc472-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 36a6dde952053c47d3fade89f97bc472 2024-12-09T11:20:50,532 INFO [StoreOpener-36a6dde952053c47d3fade89f97bc472-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 36a6dde952053c47d3fade89f97bc472 columnFamilyName b 2024-12-09T11:20:50,532 DEBUG [StoreOpener-36a6dde952053c47d3fade89f97bc472-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:50,532 INFO [StoreOpener-36a6dde952053c47d3fade89f97bc472-1 {}] regionserver.HStore(327): Store=36a6dde952053c47d3fade89f97bc472/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:50,533 INFO [StoreOpener-36a6dde952053c47d3fade89f97bc472-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 36a6dde952053c47d3fade89f97bc472 2024-12-09T11:20:50,534 INFO [StoreOpener-36a6dde952053c47d3fade89f97bc472-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 36a6dde952053c47d3fade89f97bc472 columnFamilyName c 2024-12-09T11:20:50,534 DEBUG [StoreOpener-36a6dde952053c47d3fade89f97bc472-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:50,545 DEBUG [StoreOpener-36a6dde952053c47d3fade89f97bc472-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/36a6dde952053c47d3fade89f97bc472/c/817997ddcf394a31982f60be5779a42a 2024-12-09T11:20:50,545 INFO [StoreOpener-36a6dde952053c47d3fade89f97bc472-1 {}] regionserver.HStore(327): Store=36a6dde952053c47d3fade89f97bc472/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:50,545 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 36a6dde952053c47d3fade89f97bc472 2024-12-09T11:20:50,546 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/36a6dde952053c47d3fade89f97bc472 2024-12-09T11:20:50,548 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/36a6dde952053c47d3fade89f97bc472 2024-12-09T11:20:50,550 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/36a6dde952053c47d3fade89f97bc472/recovered.edits/0000000000000000032 2024-12-09T11:20:50,560 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/36a6dde952053c47d3fade89f97bc472/recovered.edits/0000000000000000032: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-09T11:20:50,562 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 10, skipped 20, firstSequenceIdInLog=3, maxSequenceIdInLog=32, path=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/36a6dde952053c47d3fade89f97bc472/recovered.edits/0000000000000000032 2024-12-09T11:20:50,562 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 36a6dde952053c47d3fade89f97bc472 3/3 column families, dataSize=870 B heapSize=2.31 KB 2024-12-09T11:20:50,580 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/36a6dde952053c47d3fade89f97bc472/.tmp/b/980036cd6ee940a1ac7f3170589b2f3a is 91, key is testReplayEditsWrittenViaHRegion/b:x0/1733743249682/Put/seqid=0 2024-12-09T11:20:50,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741905_1083 (size=5958) 2024-12-09T11:20:50,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741905_1083 (size=5958) 2024-12-09T11:20:50,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741905_1083 (size=5958) 2024-12-09T11:20:50,611 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=32 (bloomFilter=true), to=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/36a6dde952053c47d3fade89f97bc472/.tmp/b/980036cd6ee940a1ac7f3170589b2f3a 2024-12-09T11:20:50,617 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/36a6dde952053c47d3fade89f97bc472/.tmp/b/980036cd6ee940a1ac7f3170589b2f3a as hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/36a6dde952053c47d3fade89f97bc472/b/980036cd6ee940a1ac7f3170589b2f3a 2024-12-09T11:20:50,624 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/36a6dde952053c47d3fade89f97bc472/b/980036cd6ee940a1ac7f3170589b2f3a, entries=10, sequenceid=32, filesize=5.8 K 2024-12-09T11:20:50,624 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~870 B/870, heapSize ~1.80 KB/1840, currentSize=0 B/0 for 36a6dde952053c47d3fade89f97bc472 in 62ms, sequenceid=32, compaction requested=false; wal=null 2024-12-09T11:20:50,625 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/36a6dde952053c47d3fade89f97bc472/recovered.edits/0000000000000000032 2024-12-09T11:20:50,626 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 36a6dde952053c47d3fade89f97bc472 2024-12-09T11:20:50,626 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 36a6dde952053c47d3fade89f97bc472 2024-12-09T11:20:50,627 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-09T11:20:50,628 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 36a6dde952053c47d3fade89f97bc472 2024-12-09T11:20:50,631 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/36a6dde952053c47d3fade89f97bc472/recovered.edits/33.seqid, newMaxSeqId=33, maxSeqId=1 2024-12-09T11:20:50,632 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 36a6dde952053c47d3fade89f97bc472; next sequenceid=34; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59481038, jitterRate=-0.11366346478462219}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-09T11:20:50,632 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 36a6dde952053c47d3fade89f97bc472: Writing region info on filesystem at 1733743250504Initializing all the Stores at 1733743250505 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743250505Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743250505Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743250505Obtaining lock to block concurrent updates at 1733743250562 (+57 ms)Preparing flush snapshotting stores in 36a6dde952053c47d3fade89f97bc472 at 1733743250562Finished memstore snapshotting testReplayEditsWrittenViaHRegion,,1733743249527.36a6dde952053c47d3fade89f97bc472., syncing WAL and waiting on mvcc, flushsize=dataSize=870, getHeapSize=2320, getOffHeapSize=0, getCellsCount=10 at 1733743250562Flushing stores of testReplayEditsWrittenViaHRegion,,1733743249527.36a6dde952053c47d3fade89f97bc472. at 1733743250562Flushing 36a6dde952053c47d3fade89f97bc472/b: creating writer at 1733743250563 (+1 ms)Flushing 36a6dde952053c47d3fade89f97bc472/b: appending metadata at 1733743250579 (+16 ms)Flushing 36a6dde952053c47d3fade89f97bc472/b: closing flushed file at 1733743250579Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2fcfc5f9: reopening flushed file at 1733743250617 (+38 ms)Finished flush of dataSize ~870 B/870, heapSize ~1.80 KB/1840, currentSize=0 B/0 for 36a6dde952053c47d3fade89f97bc472 in 62ms, sequenceid=32, compaction requested=false; wal=null at 1733743250624 (+7 ms)Cleaning up temporary data from old regions at 1733743250626 (+2 ms)Region opened successfully at 1733743250632 (+6 ms) 2024-12-09T11:20:50,655 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterPartialFlush Thread=411 (was 404) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1633493794_22 at /127.0.0.1:44920 [Waiting for operation #39] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1633493794_22 at /127.0.0.1:40838 [Waiting for operation #45] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1633493794_22 at /127.0.0.1:43952 [Receiving block BP-32692473-172.17.0.3-1733743223895:blk_1073741904_1082] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1633493794_22 at /127.0.0.1:44190 [Waiting for operation #39] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-20-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-32692473-172.17.0.3-1733743223895:blk_1073741904_1082, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1633493794_22 at /127.0.0.1:46540 [Receiving block BP-32692473-172.17.0.3-1733743223895:blk_1073741904_1082] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-32692473-172.17.0.3-1733743223895:blk_1073741904_1082, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-20-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-20-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-32692473-172.17.0.3-1733743223895:blk_1073741904_1082, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1633493794_22 at /127.0.0.1:39464 [Receiving block BP-32692473-172.17.0.3-1733743223895:blk_1073741904_1082] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1110 (was 1042) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=392 (was 392), ProcessCount=11 (was 11), AvailableMemoryMB=437 (was 458) 2024-12-09T11:20:50,656 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1110 is superior to 1024 2024-12-09T11:20:50,669 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterAbortingFlush Thread=411, OpenFileDescriptor=1110, MaxFileDescriptor=1048576, SystemLoadAverage=392, ProcessCount=11, AvailableMemoryMB=436 2024-12-09T11:20:50,669 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1110 is superior to 1024 2024-12-09T11:20:50,685 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T11:20:50,689 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T11:20:50,690 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T11:20:50,693 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-42543980, suffix=, logDir=hdfs://localhost:40493/hbase/WALs/hregion-42543980, archiveDir=hdfs://localhost:40493/hbase/oldWALs, maxLogs=32 2024-12-09T11:20:50,714 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-42543980/hregion-42543980.1733743250694, exclude list is [], retry=0 2024-12-09T11:20:50,718 DEBUG [AsyncFSWAL-22-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46359,DS-8f8100b1-22b1-43bf-9468-405dfca7481e,DISK] 2024-12-09T11:20:50,718 DEBUG [AsyncFSWAL-22-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44093,DS-049b5d3a-b506-46ca-8e7a-3fb74c0e7d3e,DISK] 2024-12-09T11:20:50,720 DEBUG [AsyncFSWAL-22-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34459,DS-f790a494-97c1-4864-a7b1-6442795d840b,DISK] 2024-12-09T11:20:50,728 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-42543980/hregion-42543980.1733743250694 2024-12-09T11:20:50,730 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39985:39985),(127.0.0.1/127.0.0.1:43581:43581),(127.0.0.1/127.0.0.1:39935:39935)] 2024-12-09T11:20:50,730 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 0eef9f7e6d503a3edde04b4549d99997, NAME => 'testReplayEditsAfterAbortingFlush,,1733743250687.0eef9f7e6d503a3edde04b4549d99997.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsAfterAbortingFlush', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40493/hbase 2024-12-09T11:20:50,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741907_1085 (size=68) 2024-12-09T11:20:50,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741907_1085 (size=68) 2024-12-09T11:20:50,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741907_1085 (size=68) 2024-12-09T11:20:50,751 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterAbortingFlush,,1733743250687.0eef9f7e6d503a3edde04b4549d99997.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:20:50,752 INFO [StoreOpener-0eef9f7e6d503a3edde04b4549d99997-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 0eef9f7e6d503a3edde04b4549d99997 2024-12-09T11:20:50,754 INFO [StoreOpener-0eef9f7e6d503a3edde04b4549d99997-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0eef9f7e6d503a3edde04b4549d99997 columnFamilyName a 2024-12-09T11:20:50,754 DEBUG [StoreOpener-0eef9f7e6d503a3edde04b4549d99997-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:50,755 INFO [StoreOpener-0eef9f7e6d503a3edde04b4549d99997-1 {}] regionserver.HStore(327): Store=0eef9f7e6d503a3edde04b4549d99997/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:50,755 INFO [StoreOpener-0eef9f7e6d503a3edde04b4549d99997-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 0eef9f7e6d503a3edde04b4549d99997 2024-12-09T11:20:50,757 INFO [StoreOpener-0eef9f7e6d503a3edde04b4549d99997-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0eef9f7e6d503a3edde04b4549d99997 columnFamilyName b 2024-12-09T11:20:50,757 DEBUG [StoreOpener-0eef9f7e6d503a3edde04b4549d99997-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:50,758 INFO [StoreOpener-0eef9f7e6d503a3edde04b4549d99997-1 {}] regionserver.HStore(327): Store=0eef9f7e6d503a3edde04b4549d99997/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:50,758 INFO [StoreOpener-0eef9f7e6d503a3edde04b4549d99997-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 0eef9f7e6d503a3edde04b4549d99997 2024-12-09T11:20:50,759 INFO [StoreOpener-0eef9f7e6d503a3edde04b4549d99997-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0eef9f7e6d503a3edde04b4549d99997 columnFamilyName c 2024-12-09T11:20:50,759 DEBUG [StoreOpener-0eef9f7e6d503a3edde04b4549d99997-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:50,760 INFO [StoreOpener-0eef9f7e6d503a3edde04b4549d99997-1 {}] regionserver.HStore(327): Store=0eef9f7e6d503a3edde04b4549d99997/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:50,760 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 0eef9f7e6d503a3edde04b4549d99997 2024-12-09T11:20:50,761 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testReplayEditsAfterAbortingFlush/0eef9f7e6d503a3edde04b4549d99997 2024-12-09T11:20:50,761 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testReplayEditsAfterAbortingFlush/0eef9f7e6d503a3edde04b4549d99997 2024-12-09T11:20:50,762 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 0eef9f7e6d503a3edde04b4549d99997 2024-12-09T11:20:50,762 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 0eef9f7e6d503a3edde04b4549d99997 2024-12-09T11:20:50,763 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterAbortingFlush descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-09T11:20:50,764 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 0eef9f7e6d503a3edde04b4549d99997 2024-12-09T11:20:50,767 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40493/hbase/data/default/testReplayEditsAfterAbortingFlush/0eef9f7e6d503a3edde04b4549d99997/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T11:20:50,767 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 0eef9f7e6d503a3edde04b4549d99997; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59670903, jitterRate=-0.11083425581455231}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-09T11:20:50,768 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 0eef9f7e6d503a3edde04b4549d99997: Writing region info on filesystem at 1733743250751Initializing all the Stores at 1733743250752 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743250752Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743250752Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743250752Cleaning up temporary data from old regions at 1733743250762 (+10 ms)Region opened successfully at 1733743250768 (+6 ms) 2024-12-09T11:20:50,768 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 0eef9f7e6d503a3edde04b4549d99997, disabling compactions & flushes 2024-12-09T11:20:50,768 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterAbortingFlush,,1733743250687.0eef9f7e6d503a3edde04b4549d99997. 2024-12-09T11:20:50,768 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterAbortingFlush,,1733743250687.0eef9f7e6d503a3edde04b4549d99997. 2024-12-09T11:20:50,768 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterAbortingFlush,,1733743250687.0eef9f7e6d503a3edde04b4549d99997. after waiting 0 ms 2024-12-09T11:20:50,768 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterAbortingFlush,,1733743250687.0eef9f7e6d503a3edde04b4549d99997. 2024-12-09T11:20:50,769 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsAfterAbortingFlush,,1733743250687.0eef9f7e6d503a3edde04b4549d99997. 2024-12-09T11:20:50,769 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 0eef9f7e6d503a3edde04b4549d99997: Waiting for close lock at 1733743250768Disabling compacts and flushes for region at 1733743250768Disabling writes for close at 1733743250768Writing region close event to WAL at 1733743250769 (+1 ms)Closed at 1733743250769 2024-12-09T11:20:50,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741906_1084 (size=95) 2024-12-09T11:20:50,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741906_1084 (size=95) 2024-12-09T11:20:50,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741906_1084 (size=95) 2024-12-09T11:20:50,775 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-09T11:20:50,775 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-42543980:(num 1733743250694) 2024-12-09T11:20:50,775 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-09T11:20:50,777 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:40493/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733743250684, archiveDir=hdfs://localhost:40493/hbase/oldWALs, maxLogs=32 2024-12-09T11:20:50,791 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733743250684/wal.1733743250778, exclude list is [], retry=0 2024-12-09T11:20:50,794 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44093,DS-049b5d3a-b506-46ca-8e7a-3fb74c0e7d3e,DISK] 2024-12-09T11:20:50,794 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46359,DS-8f8100b1-22b1-43bf-9468-405dfca7481e,DISK] 2024-12-09T11:20:50,795 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34459,DS-f790a494-97c1-4864-a7b1-6442795d840b,DISK] 2024-12-09T11:20:50,803 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733743250684/wal.1733743250778 2024-12-09T11:20:50,803 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43581:43581),(127.0.0.1/127.0.0.1:39985:39985),(127.0.0.1/127.0.0.1:39935:39935)] 2024-12-09T11:20:50,882 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-09T11:20:50,883 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 0eef9f7e6d503a3edde04b4549d99997, NAME => 'testReplayEditsAfterAbortingFlush,,1733743250687.0eef9f7e6d503a3edde04b4549d99997.', STARTKEY => '', ENDKEY => ''} 2024-12-09T11:20:50,887 DEBUG [Time-limited test {}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterAbortingFlush 0eef9f7e6d503a3edde04b4549d99997 2024-12-09T11:20:50,887 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterAbortingFlush,,1733743250687.0eef9f7e6d503a3edde04b4549d99997.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:20:50,887 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 0eef9f7e6d503a3edde04b4549d99997 2024-12-09T11:20:50,887 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 0eef9f7e6d503a3edde04b4549d99997 2024-12-09T11:20:50,889 INFO [StoreOpener-0eef9f7e6d503a3edde04b4549d99997-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 0eef9f7e6d503a3edde04b4549d99997 2024-12-09T11:20:50,891 INFO [StoreOpener-0eef9f7e6d503a3edde04b4549d99997-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0eef9f7e6d503a3edde04b4549d99997 columnFamilyName a 2024-12-09T11:20:50,891 DEBUG [StoreOpener-0eef9f7e6d503a3edde04b4549d99997-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:50,892 INFO [StoreOpener-0eef9f7e6d503a3edde04b4549d99997-1 {}] regionserver.HStore(327): Store=0eef9f7e6d503a3edde04b4549d99997/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:50,892 INFO [StoreOpener-0eef9f7e6d503a3edde04b4549d99997-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 0eef9f7e6d503a3edde04b4549d99997 2024-12-09T11:20:50,899 INFO [StoreOpener-0eef9f7e6d503a3edde04b4549d99997-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0eef9f7e6d503a3edde04b4549d99997 columnFamilyName b 2024-12-09T11:20:50,899 DEBUG [StoreOpener-0eef9f7e6d503a3edde04b4549d99997-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:50,900 INFO [StoreOpener-0eef9f7e6d503a3edde04b4549d99997-1 {}] regionserver.HStore(327): Store=0eef9f7e6d503a3edde04b4549d99997/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:50,900 INFO [StoreOpener-0eef9f7e6d503a3edde04b4549d99997-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 0eef9f7e6d503a3edde04b4549d99997 2024-12-09T11:20:50,901 INFO [StoreOpener-0eef9f7e6d503a3edde04b4549d99997-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0eef9f7e6d503a3edde04b4549d99997 columnFamilyName c 2024-12-09T11:20:50,901 DEBUG [StoreOpener-0eef9f7e6d503a3edde04b4549d99997-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:20:50,902 INFO [StoreOpener-0eef9f7e6d503a3edde04b4549d99997-1 {}] regionserver.HStore(327): Store=0eef9f7e6d503a3edde04b4549d99997/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:20:50,902 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 0eef9f7e6d503a3edde04b4549d99997 2024-12-09T11:20:50,903 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testReplayEditsAfterAbortingFlush/0eef9f7e6d503a3edde04b4549d99997 2024-12-09T11:20:50,909 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testReplayEditsAfterAbortingFlush/0eef9f7e6d503a3edde04b4549d99997 2024-12-09T11:20:50,911 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 0eef9f7e6d503a3edde04b4549d99997 2024-12-09T11:20:50,911 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 0eef9f7e6d503a3edde04b4549d99997 2024-12-09T11:20:50,912 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterAbortingFlush descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-09T11:20:50,914 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 0eef9f7e6d503a3edde04b4549d99997 2024-12-09T11:20:50,916 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 0eef9f7e6d503a3edde04b4549d99997; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73320115, jitterRate=0.09255485236644745}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-09T11:20:50,916 DEBUG [Time-limited test {}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0eef9f7e6d503a3edde04b4549d99997 2024-12-09T11:20:50,916 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 0eef9f7e6d503a3edde04b4549d99997: Running coprocessor pre-open hook at 1733743250887Writing region info on filesystem at 1733743250887Initializing all the Stores at 1733743250889 (+2 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743250889Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743250889Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743250889Cleaning up temporary data from old regions at 1733743250911 (+22 ms)Running coprocessor post-open hooks at 1733743250916 (+5 ms)Region opened successfully at 1733743250916 2024-12-09T11:20:50,935 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 0eef9f7e6d503a3edde04b4549d99997 3/3 column families, dataSize=590 B heapSize=2.08 KB 2024-12-09T11:20:50,937 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 0eef9f7e6d503a3edde04b4549d99997/a, retrying num=0 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:20:51,938 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 0eef9f7e6d503a3edde04b4549d99997/a, retrying num=1 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:20:52,938 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 0eef9f7e6d503a3edde04b4549d99997/a, retrying num=2 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:20:53,939 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 0eef9f7e6d503a3edde04b4549d99997/a, retrying num=3 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:20:54,940 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 0eef9f7e6d503a3edde04b4549d99997/a, retrying num=4 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:20:55,941 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 0eef9f7e6d503a3edde04b4549d99997/a, retrying num=5 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:20:56,389 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-09T11:20:56,634 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T11:20:56,941 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 0eef9f7e6d503a3edde04b4549d99997/a, retrying num=6 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:20:57,942 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 0eef9f7e6d503a3edde04b4549d99997/a, retrying num=7 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:20:58,147 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsWrittenViaHRegion 2024-12-09T11:20:58,147 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsWrittenViaHRegion Metrics about Tables on a single HBase RegionServer 2024-12-09T11:20:58,148 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T11:20:58,148 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-09T11:20:58,149 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsAfterRegionMovedWithMultiCF 2024-12-09T11:20:58,149 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsAfterRegionMovedWithMultiCF Metrics about Tables on a single HBase RegionServer 2024-12-09T11:20:58,149 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsAfterAbortingFlush 2024-12-09T11:20:58,149 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsAfterAbortingFlush Metrics about Tables on a single HBase RegionServer 2024-12-09T11:20:58,150 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T11:20:58,150 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-09T11:20:58,943 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 0eef9f7e6d503a3edde04b4549d99997/a, retrying num=8 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:20:59,943 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 0eef9f7e6d503a3edde04b4549d99997/a, retrying num=9 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:20:59,944 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 0eef9f7e6d503a3edde04b4549d99997: 2024-12-09T11:20:59,945 INFO [Time-limited test {}] wal.AbstractTestWALReplay(671): Expected simulated exception when flushing region, region: testReplayEditsAfterAbortingFlush,,1733743250687.0eef9f7e6d503a3edde04b4549d99997. 2024-12-09T11:20:59,960 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 0eef9f7e6d503a3edde04b4549d99997: 2024-12-09T11:20:59,961 INFO [Time-limited test {}] wal.AbstractTestWALReplay(691): Expected exception when flushing region because server is stopped,Aborting flush because server is aborted... 2024-12-09T11:20:59,961 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 0eef9f7e6d503a3edde04b4549d99997, disabling compactions & flushes 2024-12-09T11:20:59,961 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterAbortingFlush,,1733743250687.0eef9f7e6d503a3edde04b4549d99997. 2024-12-09T11:20:59,961 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterAbortingFlush,,1733743250687.0eef9f7e6d503a3edde04b4549d99997. 2024-12-09T11:20:59,961 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterAbortingFlush,,1733743250687.0eef9f7e6d503a3edde04b4549d99997. after waiting 0 ms 2024-12-09T11:20:59,961 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterAbortingFlush,,1733743250687.0eef9f7e6d503a3edde04b4549d99997. 2024-12-09T11:20:59,962 ERROR [Time-limited test {}] regionserver.HRegion(1960): Memstore data size is 1190 in region testReplayEditsAfterAbortingFlush,,1733743250687.0eef9f7e6d503a3edde04b4549d99997. 2024-12-09T11:20:59,962 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsAfterAbortingFlush,,1733743250687.0eef9f7e6d503a3edde04b4549d99997. 2024-12-09T11:20:59,962 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 0eef9f7e6d503a3edde04b4549d99997: Waiting for close lock at 1733743259961Running coprocessor pre-close hooks at 1733743259961Disabling compacts and flushes for region at 1733743259961Disabling writes for close at 1733743259961Writing region close event to WAL at 1733743259962 (+1 ms)Running coprocessor post-close hooks at 1733743259962Closed at 1733743259962 2024-12-09T11:20:59,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741908_1086 (size=2685) 2024-12-09T11:20:59,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741908_1086 (size=2685) 2024-12-09T11:20:59,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741908_1086 (size=2685) 2024-12-09T11:20:59,986 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:40493/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733743250684/wal.1733743250778, size=2.6 K (2685bytes) 2024-12-09T11:20:59,986 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40493/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733743250684/wal.1733743250778 2024-12-09T11:20:59,987 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:40493/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733743250684/wal.1733743250778 after 1ms 2024-12-09T11:21:00,001 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:40493/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733743250684/wal.1733743250778: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-09T11:21:00,001 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:40493/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733743250684/wal.1733743250778 took 15ms 2024-12-09T11:21:00,007 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:40493/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733743250684/wal.1733743250778 so closing down 2024-12-09T11:21:00,007 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-09T11:21:00,009 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000004-wal.1733743250778.temp 2024-12-09T11:21:00,010 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testReplayEditsAfterAbortingFlush/0eef9f7e6d503a3edde04b4549d99997/recovered.edits/0000000000000000004-wal.1733743250778.temp 2024-12-09T11:21:00,011 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-09T11:21:00,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741909_1087 (size=2094) 2024-12-09T11:21:00,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741909_1087 (size=2094) 2024-12-09T11:21:00,027 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testReplayEditsAfterAbortingFlush/0eef9f7e6d503a3edde04b4549d99997/recovered.edits/0000000000000000004-wal.1733743250778.temp (wrote 20 edits, skipped 0 edits in 0 ms) 2024-12-09T11:21:00,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741909_1087 (size=2094) 2024-12-09T11:21:00,040 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:40493/hbase/data/default/testReplayEditsAfterAbortingFlush/0eef9f7e6d503a3edde04b4549d99997/recovered.edits/0000000000000000004-wal.1733743250778.temp to hdfs://localhost:40493/hbase/data/default/testReplayEditsAfterAbortingFlush/0eef9f7e6d503a3edde04b4549d99997/recovered.edits/0000000000000000026 2024-12-09T11:21:00,040 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 23 edits across 1 Regions in 38 ms; skipped=3; WAL=hdfs://localhost:40493/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733743250684/wal.1733743250778, size=2.6 K, length=2685, corrupted=false, cancelled=false 2024-12-09T11:21:00,040 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:40493/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733743250684/wal.1733743250778, journal: Splitting hdfs://localhost:40493/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733743250684/wal.1733743250778, size=2.6 K (2685bytes) at 1733743259986Finishing writing output for hdfs://localhost:40493/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733743250684/wal.1733743250778 so closing down at 1733743260007 (+21 ms)Creating recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testReplayEditsAfterAbortingFlush/0eef9f7e6d503a3edde04b4549d99997/recovered.edits/0000000000000000004-wal.1733743250778.temp at 1733743260010 (+3 ms)3 split writer threads finished at 1733743260011 (+1 ms)Closed recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testReplayEditsAfterAbortingFlush/0eef9f7e6d503a3edde04b4549d99997/recovered.edits/0000000000000000004-wal.1733743250778.temp (wrote 20 edits, skipped 0 edits in 0 ms) at 1733743260027 (+16 ms)Rename recovered edits hdfs://localhost:40493/hbase/data/default/testReplayEditsAfterAbortingFlush/0eef9f7e6d503a3edde04b4549d99997/recovered.edits/0000000000000000004-wal.1733743250778.temp to hdfs://localhost:40493/hbase/data/default/testReplayEditsAfterAbortingFlush/0eef9f7e6d503a3edde04b4549d99997/recovered.edits/0000000000000000026 at 1733743260040 (+13 ms)Processed 23 edits across 1 Regions in 38 ms; skipped=3; WAL=hdfs://localhost:40493/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733743250684/wal.1733743250778, size=2.6 K, length=2685, corrupted=false, cancelled=false at 1733743260040 2024-12-09T11:21:00,043 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:40493/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733743250684/wal.1733743250778 to hdfs://localhost:40493/hbase/oldWALs/wal.1733743250778 2024-12-09T11:21:00,045 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:40493/hbase/data/default/testReplayEditsAfterAbortingFlush/0eef9f7e6d503a3edde04b4549d99997/recovered.edits/0000000000000000026 2024-12-09T11:21:00,045 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-09T11:21:00,051 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:40493/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733743250684, archiveDir=hdfs://localhost:40493/hbase/oldWALs, maxLogs=32 2024-12-09T11:21:00,073 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733743250684/wal.1733743260052, exclude list is [], retry=0 2024-12-09T11:21:00,076 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46359,DS-8f8100b1-22b1-43bf-9468-405dfca7481e,DISK] 2024-12-09T11:21:00,077 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44093,DS-049b5d3a-b506-46ca-8e7a-3fb74c0e7d3e,DISK] 2024-12-09T11:21:00,077 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34459,DS-f790a494-97c1-4864-a7b1-6442795d840b,DISK] 2024-12-09T11:21:00,080 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733743250684/wal.1733743260052 2024-12-09T11:21:00,080 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39985:39985),(127.0.0.1/127.0.0.1:43581:43581),(127.0.0.1/127.0.0.1:39935:39935)] 2024-12-09T11:21:00,080 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 0eef9f7e6d503a3edde04b4549d99997, NAME => 'testReplayEditsAfterAbortingFlush,,1733743250687.0eef9f7e6d503a3edde04b4549d99997.', STARTKEY => '', ENDKEY => ''} 2024-12-09T11:21:00,081 DEBUG [Time-limited test {}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterAbortingFlush 0eef9f7e6d503a3edde04b4549d99997 2024-12-09T11:21:00,081 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterAbortingFlush,,1733743250687.0eef9f7e6d503a3edde04b4549d99997.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:21:00,081 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 0eef9f7e6d503a3edde04b4549d99997 2024-12-09T11:21:00,081 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 0eef9f7e6d503a3edde04b4549d99997 2024-12-09T11:21:00,083 INFO [StoreOpener-0eef9f7e6d503a3edde04b4549d99997-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 0eef9f7e6d503a3edde04b4549d99997 2024-12-09T11:21:00,084 INFO [StoreOpener-0eef9f7e6d503a3edde04b4549d99997-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0eef9f7e6d503a3edde04b4549d99997 columnFamilyName a 2024-12-09T11:21:00,084 DEBUG [StoreOpener-0eef9f7e6d503a3edde04b4549d99997-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:21:00,085 INFO [StoreOpener-0eef9f7e6d503a3edde04b4549d99997-1 {}] regionserver.HStore(327): Store=0eef9f7e6d503a3edde04b4549d99997/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:21:00,085 INFO [StoreOpener-0eef9f7e6d503a3edde04b4549d99997-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 0eef9f7e6d503a3edde04b4549d99997 2024-12-09T11:21:00,086 INFO [StoreOpener-0eef9f7e6d503a3edde04b4549d99997-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0eef9f7e6d503a3edde04b4549d99997 columnFamilyName b 2024-12-09T11:21:00,086 DEBUG [StoreOpener-0eef9f7e6d503a3edde04b4549d99997-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:21:00,086 INFO [StoreOpener-0eef9f7e6d503a3edde04b4549d99997-1 {}] regionserver.HStore(327): Store=0eef9f7e6d503a3edde04b4549d99997/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:21:00,087 INFO [StoreOpener-0eef9f7e6d503a3edde04b4549d99997-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 0eef9f7e6d503a3edde04b4549d99997 2024-12-09T11:21:00,087 INFO [StoreOpener-0eef9f7e6d503a3edde04b4549d99997-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0eef9f7e6d503a3edde04b4549d99997 columnFamilyName c 2024-12-09T11:21:00,088 DEBUG [StoreOpener-0eef9f7e6d503a3edde04b4549d99997-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:21:00,088 INFO [StoreOpener-0eef9f7e6d503a3edde04b4549d99997-1 {}] regionserver.HStore(327): Store=0eef9f7e6d503a3edde04b4549d99997/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:21:00,088 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 0eef9f7e6d503a3edde04b4549d99997 2024-12-09T11:21:00,089 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testReplayEditsAfterAbortingFlush/0eef9f7e6d503a3edde04b4549d99997 2024-12-09T11:21:00,090 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testReplayEditsAfterAbortingFlush/0eef9f7e6d503a3edde04b4549d99997 2024-12-09T11:21:00,091 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:40493/hbase/data/default/testReplayEditsAfterAbortingFlush/0eef9f7e6d503a3edde04b4549d99997/recovered.edits/0000000000000000026 2024-12-09T11:21:00,095 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:40493/hbase/data/default/testReplayEditsAfterAbortingFlush/0eef9f7e6d503a3edde04b4549d99997/recovered.edits/0000000000000000026: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-09T11:21:00,097 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 20, skipped 0, firstSequenceIdInLog=4, maxSequenceIdInLog=26, path=hdfs://localhost:40493/hbase/data/default/testReplayEditsAfterAbortingFlush/0eef9f7e6d503a3edde04b4549d99997/recovered.edits/0000000000000000026 2024-12-09T11:21:00,098 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 0eef9f7e6d503a3edde04b4549d99997 3/3 column families, dataSize=1.16 KB heapSize=3.41 KB 2024-12-09T11:21:00,117 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40493/hbase/data/default/testReplayEditsAfterAbortingFlush/0eef9f7e6d503a3edde04b4549d99997/.tmp/a/a2ac1d3cdd934f48b742f60d90604889 is 64, key is testReplayEditsAfterAbortingFlush12/a:q/1733743259949/Put/seqid=0 2024-12-09T11:21:00,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741911_1089 (size=5523) 2024-12-09T11:21:00,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741911_1089 (size=5523) 2024-12-09T11:21:00,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741911_1089 (size=5523) 2024-12-09T11:21:00,126 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=416 B at sequenceid=26 (bloomFilter=true), to=hdfs://localhost:40493/hbase/data/default/testReplayEditsAfterAbortingFlush/0eef9f7e6d503a3edde04b4549d99997/.tmp/a/a2ac1d3cdd934f48b742f60d90604889 2024-12-09T11:21:00,150 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40493/hbase/data/default/testReplayEditsAfterAbortingFlush/0eef9f7e6d503a3edde04b4549d99997/.tmp/b/52778d5acf4c449ab8453036956b3d79 is 64, key is testReplayEditsAfterAbortingFlush10/b:q/1733743259945/Put/seqid=0 2024-12-09T11:21:00,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741912_1090 (size=5524) 2024-12-09T11:21:00,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741912_1090 (size=5524) 2024-12-09T11:21:00,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741912_1090 (size=5524) 2024-12-09T11:21:00,177 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=417 B at sequenceid=26 (bloomFilter=true), to=hdfs://localhost:40493/hbase/data/default/testReplayEditsAfterAbortingFlush/0eef9f7e6d503a3edde04b4549d99997/.tmp/b/52778d5acf4c449ab8453036956b3d79 2024-12-09T11:21:00,202 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40493/hbase/data/default/testReplayEditsAfterAbortingFlush/0eef9f7e6d503a3edde04b4549d99997/.tmp/c/d02de6478b734798a563f8e6ca914fab is 64, key is testReplayEditsAfterAbortingFlush11/c:q/1733743259947/Put/seqid=0 2024-12-09T11:21:00,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741913_1091 (size=5457) 2024-12-09T11:21:00,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741913_1091 (size=5457) 2024-12-09T11:21:00,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741913_1091 (size=5457) 2024-12-09T11:21:00,210 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=357 B at sequenceid=26 (bloomFilter=true), to=hdfs://localhost:40493/hbase/data/default/testReplayEditsAfterAbortingFlush/0eef9f7e6d503a3edde04b4549d99997/.tmp/c/d02de6478b734798a563f8e6ca914fab 2024-12-09T11:21:00,217 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40493/hbase/data/default/testReplayEditsAfterAbortingFlush/0eef9f7e6d503a3edde04b4549d99997/.tmp/a/a2ac1d3cdd934f48b742f60d90604889 as hdfs://localhost:40493/hbase/data/default/testReplayEditsAfterAbortingFlush/0eef9f7e6d503a3edde04b4549d99997/a/a2ac1d3cdd934f48b742f60d90604889 2024-12-09T11:21:00,224 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40493/hbase/data/default/testReplayEditsAfterAbortingFlush/0eef9f7e6d503a3edde04b4549d99997/a/a2ac1d3cdd934f48b742f60d90604889, entries=7, sequenceid=26, filesize=5.4 K 2024-12-09T11:21:00,225 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40493/hbase/data/default/testReplayEditsAfterAbortingFlush/0eef9f7e6d503a3edde04b4549d99997/.tmp/b/52778d5acf4c449ab8453036956b3d79 as hdfs://localhost:40493/hbase/data/default/testReplayEditsAfterAbortingFlush/0eef9f7e6d503a3edde04b4549d99997/b/52778d5acf4c449ab8453036956b3d79 2024-12-09T11:21:00,230 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40493/hbase/data/default/testReplayEditsAfterAbortingFlush/0eef9f7e6d503a3edde04b4549d99997/b/52778d5acf4c449ab8453036956b3d79, entries=7, sequenceid=26, filesize=5.4 K 2024-12-09T11:21:00,231 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40493/hbase/data/default/testReplayEditsAfterAbortingFlush/0eef9f7e6d503a3edde04b4549d99997/.tmp/c/d02de6478b734798a563f8e6ca914fab as hdfs://localhost:40493/hbase/data/default/testReplayEditsAfterAbortingFlush/0eef9f7e6d503a3edde04b4549d99997/c/d02de6478b734798a563f8e6ca914fab 2024-12-09T11:21:00,238 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40493/hbase/data/default/testReplayEditsAfterAbortingFlush/0eef9f7e6d503a3edde04b4549d99997/c/d02de6478b734798a563f8e6ca914fab, entries=6, sequenceid=26, filesize=5.3 K 2024-12-09T11:21:00,238 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.16 KB/1190, heapSize ~3.36 KB/3440, currentSize=0 B/0 for 0eef9f7e6d503a3edde04b4549d99997 in 141ms, sequenceid=26, compaction requested=false; wal=null 2024-12-09T11:21:00,239 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:40493/hbase/data/default/testReplayEditsAfterAbortingFlush/0eef9f7e6d503a3edde04b4549d99997/recovered.edits/0000000000000000026 2024-12-09T11:21:00,241 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 0eef9f7e6d503a3edde04b4549d99997 2024-12-09T11:21:00,241 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 0eef9f7e6d503a3edde04b4549d99997 2024-12-09T11:21:00,241 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterAbortingFlush descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-09T11:21:00,243 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 0eef9f7e6d503a3edde04b4549d99997 2024-12-09T11:21:00,245 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40493/hbase/data/default/testReplayEditsAfterAbortingFlush/0eef9f7e6d503a3edde04b4549d99997/recovered.edits/26.seqid, newMaxSeqId=26, maxSeqId=1 2024-12-09T11:21:00,246 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 0eef9f7e6d503a3edde04b4549d99997; next sequenceid=27; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66895117, jitterRate=-0.003185078501701355}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-09T11:21:00,246 DEBUG [Time-limited test {}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0eef9f7e6d503a3edde04b4549d99997 2024-12-09T11:21:00,246 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 0eef9f7e6d503a3edde04b4549d99997: Running coprocessor pre-open hook at 1733743260081Writing region info on filesystem at 1733743260081Initializing all the Stores at 1733743260082 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743260082Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743260083 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743260083Obtaining lock to block concurrent updates at 1733743260098 (+15 ms)Preparing flush snapshotting stores in 0eef9f7e6d503a3edde04b4549d99997 at 1733743260098Finished memstore snapshotting testReplayEditsAfterAbortingFlush,,1733743250687.0eef9f7e6d503a3edde04b4549d99997., syncing WAL and waiting on mvcc, flushsize=dataSize=1190, getHeapSize=3440, getOffHeapSize=0, getCellsCount=20 at 1733743260098Flushing stores of testReplayEditsAfterAbortingFlush,,1733743250687.0eef9f7e6d503a3edde04b4549d99997. at 1733743260098Flushing 0eef9f7e6d503a3edde04b4549d99997/a: creating writer at 1733743260098Flushing 0eef9f7e6d503a3edde04b4549d99997/a: appending metadata at 1733743260117 (+19 ms)Flushing 0eef9f7e6d503a3edde04b4549d99997/a: closing flushed file at 1733743260117Flushing 0eef9f7e6d503a3edde04b4549d99997/b: creating writer at 1733743260132 (+15 ms)Flushing 0eef9f7e6d503a3edde04b4549d99997/b: appending metadata at 1733743260150 (+18 ms)Flushing 0eef9f7e6d503a3edde04b4549d99997/b: closing flushed file at 1733743260150Flushing 0eef9f7e6d503a3edde04b4549d99997/c: creating writer at 1733743260185 (+35 ms)Flushing 0eef9f7e6d503a3edde04b4549d99997/c: appending metadata at 1733743260201 (+16 ms)Flushing 0eef9f7e6d503a3edde04b4549d99997/c: closing flushed file at 1733743260201Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@77f05585: reopening flushed file at 1733743260216 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4dea69d4: reopening flushed file at 1733743260224 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7d9243b9: reopening flushed file at 1733743260230 (+6 ms)Finished flush of dataSize ~1.16 KB/1190, heapSize ~3.36 KB/3440, currentSize=0 B/0 for 0eef9f7e6d503a3edde04b4549d99997 in 141ms, sequenceid=26, compaction requested=false; wal=null at 1733743260238 (+8 ms)Cleaning up temporary data from old regions at 1733743260241 (+3 ms)Running coprocessor post-open hooks at 1733743260246 (+5 ms)Region opened successfully at 1733743260246 2024-12-09T11:21:00,275 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterAbortingFlush Thread=415 (was 411) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1633493794_22 at /127.0.0.1:60400 [Receiving block BP-32692473-172.17.0.3-1733743223895:blk_1073741910_1088] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-32692473-172.17.0.3-1733743223895:blk_1073741910_1088, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1633493794_22 at /127.0.0.1:37682 [Receiving block BP-32692473-172.17.0.3-1733743223895:blk_1073741910_1088] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-22-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1633493794_22 at /127.0.0.1:59934 [Receiving block BP-32692473-172.17.0.3-1733743223895:blk_1073741910_1088] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-32692473-172.17.0.3-1733743223895:blk_1073741910_1088, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1633493794_22 at /127.0.0.1:60372 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-22-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-32692473-172.17.0.3-1733743223895:blk_1073741910_1088, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1633493794_22 at /127.0.0.1:37690 [Waiting for operation #9] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1633493794_22 at /127.0.0.1:59954 [Waiting for operation #11] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-22-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1176 (was 1110) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=419 (was 392) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=352 (was 436) 2024-12-09T11:21:00,275 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1176 is superior to 1024 2024-12-09T11:21:00,297 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testDatalossWhenInputError Thread=415, OpenFileDescriptor=1176, MaxFileDescriptor=1048576, SystemLoadAverage=419, ProcessCount=11, AvailableMemoryMB=351 2024-12-09T11:21:00,297 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1176 is superior to 1024 2024-12-09T11:21:00,315 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T11:21:00,317 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T11:21:00,317 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T11:21:00,320 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-04777048, suffix=, logDir=hdfs://localhost:40493/hbase/WALs/hregion-04777048, archiveDir=hdfs://localhost:40493/hbase/oldWALs, maxLogs=32 2024-12-09T11:21:00,335 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-04777048/hregion-04777048.1733743260320, exclude list is [], retry=0 2024-12-09T11:21:00,338 DEBUG [AsyncFSWAL-24-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34459,DS-f790a494-97c1-4864-a7b1-6442795d840b,DISK] 2024-12-09T11:21:00,339 DEBUG [AsyncFSWAL-24-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46359,DS-8f8100b1-22b1-43bf-9468-405dfca7481e,DISK] 2024-12-09T11:21:00,341 DEBUG [AsyncFSWAL-24-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44093,DS-049b5d3a-b506-46ca-8e7a-3fb74c0e7d3e,DISK] 2024-12-09T11:21:00,343 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-04777048/hregion-04777048.1733743260320 2024-12-09T11:21:00,343 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39935:39935),(127.0.0.1/127.0.0.1:39985:39985),(127.0.0.1/127.0.0.1:43581:43581)] 2024-12-09T11:21:00,343 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 5070524e0a2678338f7e2a07c08187ba, NAME => 'testDatalossWhenInputError,,1733743260316.5070524e0a2678338f7e2a07c08187ba.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testDatalossWhenInputError', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40493/hbase 2024-12-09T11:21:00,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741915_1093 (size=61) 2024-12-09T11:21:00,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741915_1093 (size=61) 2024-12-09T11:21:00,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741915_1093 (size=61) 2024-12-09T11:21:00,354 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testDatalossWhenInputError,,1733743260316.5070524e0a2678338f7e2a07c08187ba.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:21:00,356 INFO [StoreOpener-5070524e0a2678338f7e2a07c08187ba-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 5070524e0a2678338f7e2a07c08187ba 2024-12-09T11:21:00,357 INFO [StoreOpener-5070524e0a2678338f7e2a07c08187ba-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5070524e0a2678338f7e2a07c08187ba columnFamilyName a 2024-12-09T11:21:00,358 DEBUG [StoreOpener-5070524e0a2678338f7e2a07c08187ba-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:21:00,358 INFO [StoreOpener-5070524e0a2678338f7e2a07c08187ba-1 {}] regionserver.HStore(327): Store=5070524e0a2678338f7e2a07c08187ba/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:21:00,358 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 5070524e0a2678338f7e2a07c08187ba 2024-12-09T11:21:00,360 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testDatalossWhenInputError/5070524e0a2678338f7e2a07c08187ba 2024-12-09T11:21:00,360 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testDatalossWhenInputError/5070524e0a2678338f7e2a07c08187ba 2024-12-09T11:21:00,361 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 5070524e0a2678338f7e2a07c08187ba 2024-12-09T11:21:00,361 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 5070524e0a2678338f7e2a07c08187ba 2024-12-09T11:21:00,363 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 5070524e0a2678338f7e2a07c08187ba 2024-12-09T11:21:00,365 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40493/hbase/data/default/testDatalossWhenInputError/5070524e0a2678338f7e2a07c08187ba/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T11:21:00,366 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 5070524e0a2678338f7e2a07c08187ba; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61647688, jitterRate=-0.08137786388397217}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T11:21:00,366 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 5070524e0a2678338f7e2a07c08187ba: Writing region info on filesystem at 1733743260354Initializing all the Stores at 1733743260355 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743260355Cleaning up temporary data from old regions at 1733743260361 (+6 ms)Region opened successfully at 1733743260366 (+5 ms) 2024-12-09T11:21:00,366 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 5070524e0a2678338f7e2a07c08187ba, disabling compactions & flushes 2024-12-09T11:21:00,367 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testDatalossWhenInputError,,1733743260316.5070524e0a2678338f7e2a07c08187ba. 2024-12-09T11:21:00,367 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testDatalossWhenInputError,,1733743260316.5070524e0a2678338f7e2a07c08187ba. 2024-12-09T11:21:00,367 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testDatalossWhenInputError,,1733743260316.5070524e0a2678338f7e2a07c08187ba. after waiting 0 ms 2024-12-09T11:21:00,367 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testDatalossWhenInputError,,1733743260316.5070524e0a2678338f7e2a07c08187ba. 2024-12-09T11:21:00,367 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testDatalossWhenInputError,,1733743260316.5070524e0a2678338f7e2a07c08187ba. 2024-12-09T11:21:00,367 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 5070524e0a2678338f7e2a07c08187ba: Waiting for close lock at 1733743260366Disabling compacts and flushes for region at 1733743260366Disabling writes for close at 1733743260367 (+1 ms)Writing region close event to WAL at 1733743260367Closed at 1733743260367 2024-12-09T11:21:00,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741914_1092 (size=95) 2024-12-09T11:21:00,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741914_1092 (size=95) 2024-12-09T11:21:00,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741914_1092 (size=95) 2024-12-09T11:21:00,375 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-09T11:21:00,375 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-04777048:(num 1733743260320) 2024-12-09T11:21:00,375 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-09T11:21:00,377 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:40493/hbase/WALs/testdatalosswheninputerror-manual,16010,1733743260314, archiveDir=hdfs://localhost:40493/hbase/oldWALs, maxLogs=32 2024-12-09T11:21:00,393 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testdatalosswheninputerror-manual,16010,1733743260314/wal.1733743260378, exclude list is [], retry=0 2024-12-09T11:21:00,396 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34459,DS-f790a494-97c1-4864-a7b1-6442795d840b,DISK] 2024-12-09T11:21:00,396 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46359,DS-8f8100b1-22b1-43bf-9468-405dfca7481e,DISK] 2024-12-09T11:21:00,397 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44093,DS-049b5d3a-b506-46ca-8e7a-3fb74c0e7d3e,DISK] 2024-12-09T11:21:00,398 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testdatalosswheninputerror-manual,16010,1733743260314/wal.1733743260378 2024-12-09T11:21:00,402 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39935:39935),(127.0.0.1/127.0.0.1:39985:39985),(127.0.0.1/127.0.0.1:43581:43581)] 2024-12-09T11:21:00,403 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 5070524e0a2678338f7e2a07c08187ba, NAME => 'testDatalossWhenInputError,,1733743260316.5070524e0a2678338f7e2a07c08187ba.', STARTKEY => '', ENDKEY => ''} 2024-12-09T11:21:00,403 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testDatalossWhenInputError,,1733743260316.5070524e0a2678338f7e2a07c08187ba.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:21:00,403 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 5070524e0a2678338f7e2a07c08187ba 2024-12-09T11:21:00,403 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 5070524e0a2678338f7e2a07c08187ba 2024-12-09T11:21:00,404 INFO [StoreOpener-5070524e0a2678338f7e2a07c08187ba-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 5070524e0a2678338f7e2a07c08187ba 2024-12-09T11:21:00,406 INFO [StoreOpener-5070524e0a2678338f7e2a07c08187ba-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5070524e0a2678338f7e2a07c08187ba columnFamilyName a 2024-12-09T11:21:00,406 DEBUG [StoreOpener-5070524e0a2678338f7e2a07c08187ba-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:21:00,407 INFO [StoreOpener-5070524e0a2678338f7e2a07c08187ba-1 {}] regionserver.HStore(327): Store=5070524e0a2678338f7e2a07c08187ba/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:21:00,407 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 5070524e0a2678338f7e2a07c08187ba 2024-12-09T11:21:00,408 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testDatalossWhenInputError/5070524e0a2678338f7e2a07c08187ba 2024-12-09T11:21:00,409 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testDatalossWhenInputError/5070524e0a2678338f7e2a07c08187ba 2024-12-09T11:21:00,410 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 5070524e0a2678338f7e2a07c08187ba 2024-12-09T11:21:00,410 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 5070524e0a2678338f7e2a07c08187ba 2024-12-09T11:21:00,412 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 5070524e0a2678338f7e2a07c08187ba 2024-12-09T11:21:00,413 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 5070524e0a2678338f7e2a07c08187ba; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66639718, jitterRate=-0.0069908201694488525}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T11:21:00,414 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 5070524e0a2678338f7e2a07c08187ba: Writing region info on filesystem at 1733743260403Initializing all the Stores at 1733743260404 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743260404Cleaning up temporary data from old regions at 1733743260410 (+6 ms)Region opened successfully at 1733743260414 (+4 ms) 2024-12-09T11:21:00,425 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 5070524e0a2678338f7e2a07c08187ba, disabling compactions & flushes 2024-12-09T11:21:00,425 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testDatalossWhenInputError,,1733743260316.5070524e0a2678338f7e2a07c08187ba. 2024-12-09T11:21:00,425 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testDatalossWhenInputError,,1733743260316.5070524e0a2678338f7e2a07c08187ba. 2024-12-09T11:21:00,425 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testDatalossWhenInputError,,1733743260316.5070524e0a2678338f7e2a07c08187ba. after waiting 0 ms 2024-12-09T11:21:00,425 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testDatalossWhenInputError,,1733743260316.5070524e0a2678338f7e2a07c08187ba. 2024-12-09T11:21:00,427 ERROR [Time-limited test {}] regionserver.HRegion(1960): Memstore data size is 750 in region testDatalossWhenInputError,,1733743260316.5070524e0a2678338f7e2a07c08187ba. 2024-12-09T11:21:00,427 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testDatalossWhenInputError,,1733743260316.5070524e0a2678338f7e2a07c08187ba. 2024-12-09T11:21:00,427 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 5070524e0a2678338f7e2a07c08187ba: Waiting for close lock at 1733743260425Disabling compacts and flushes for region at 1733743260425Disabling writes for close at 1733743260425Writing region close event to WAL at 1733743260427 (+2 ms)Closed at 1733743260427 2024-12-09T11:21:00,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741916_1094 (size=1050) 2024-12-09T11:21:00,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741916_1094 (size=1050) 2024-12-09T11:21:00,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741916_1094 (size=1050) 2024-12-09T11:21:00,461 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:40493/hbase/WALs/testdatalosswheninputerror-manual,16010,1733743260314/wal.1733743260378, size=1.0 K (1050bytes) 2024-12-09T11:21:00,461 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40493/hbase/WALs/testdatalosswheninputerror-manual,16010,1733743260314/wal.1733743260378 2024-12-09T11:21:00,462 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:40493/hbase/WALs/testdatalosswheninputerror-manual,16010,1733743260314/wal.1733743260378 after 1ms 2024-12-09T11:21:00,465 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:40493/hbase/WALs/testdatalosswheninputerror-manual,16010,1733743260314/wal.1733743260378: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-09T11:21:00,465 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:40493/hbase/WALs/testdatalosswheninputerror-manual,16010,1733743260314/wal.1733743260378 took 4ms 2024-12-09T11:21:00,468 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:40493/hbase/WALs/testdatalosswheninputerror-manual,16010,1733743260314/wal.1733743260378 so closing down 2024-12-09T11:21:00,468 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-09T11:21:00,475 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1733743260378.temp 2024-12-09T11:21:00,478 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testDatalossWhenInputError/5070524e0a2678338f7e2a07c08187ba/recovered.edits/0000000000000000003-wal.1733743260378.temp 2024-12-09T11:21:00,479 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-09T11:21:00,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741917_1095 (size=1050) 2024-12-09T11:21:00,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741917_1095 (size=1050) 2024-12-09T11:21:00,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741917_1095 (size=1050) 2024-12-09T11:21:00,490 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testDatalossWhenInputError/5070524e0a2678338f7e2a07c08187ba/recovered.edits/0000000000000000003-wal.1733743260378.temp (wrote 10 edits, skipped 0 edits in 0 ms) 2024-12-09T11:21:00,491 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:40493/hbase/data/default/testDatalossWhenInputError/5070524e0a2678338f7e2a07c08187ba/recovered.edits/0000000000000000003-wal.1733743260378.temp to hdfs://localhost:40493/hbase/data/default/testDatalossWhenInputError/5070524e0a2678338f7e2a07c08187ba/recovered.edits/0000000000000000012 2024-12-09T11:21:00,492 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 10 edits across 1 Regions in 26 ms; skipped=0; WAL=hdfs://localhost:40493/hbase/WALs/testdatalosswheninputerror-manual,16010,1733743260314/wal.1733743260378, size=1.0 K, length=1050, corrupted=false, cancelled=false 2024-12-09T11:21:00,492 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:40493/hbase/WALs/testdatalosswheninputerror-manual,16010,1733743260314/wal.1733743260378, journal: Splitting hdfs://localhost:40493/hbase/WALs/testdatalosswheninputerror-manual,16010,1733743260314/wal.1733743260378, size=1.0 K (1050bytes) at 1733743260461Finishing writing output for hdfs://localhost:40493/hbase/WALs/testdatalosswheninputerror-manual,16010,1733743260314/wal.1733743260378 so closing down at 1733743260468 (+7 ms)Creating recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testDatalossWhenInputError/5070524e0a2678338f7e2a07c08187ba/recovered.edits/0000000000000000003-wal.1733743260378.temp at 1733743260478 (+10 ms)3 split writer threads finished at 1733743260479 (+1 ms)Closed recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testDatalossWhenInputError/5070524e0a2678338f7e2a07c08187ba/recovered.edits/0000000000000000003-wal.1733743260378.temp (wrote 10 edits, skipped 0 edits in 0 ms) at 1733743260490 (+11 ms)Rename recovered edits hdfs://localhost:40493/hbase/data/default/testDatalossWhenInputError/5070524e0a2678338f7e2a07c08187ba/recovered.edits/0000000000000000003-wal.1733743260378.temp to hdfs://localhost:40493/hbase/data/default/testDatalossWhenInputError/5070524e0a2678338f7e2a07c08187ba/recovered.edits/0000000000000000012 at 1733743260492 (+2 ms)Processed 10 edits across 1 Regions in 26 ms; skipped=0; WAL=hdfs://localhost:40493/hbase/WALs/testdatalosswheninputerror-manual,16010,1733743260314/wal.1733743260378, size=1.0 K, length=1050, corrupted=false, cancelled=false at 1733743260492 2024-12-09T11:21:00,494 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:40493/hbase/WALs/testdatalosswheninputerror-manual,16010,1733743260314/wal.1733743260378 to hdfs://localhost:40493/hbase/oldWALs/wal.1733743260378 2024-12-09T11:21:00,494 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:40493/hbase/data/default/testDatalossWhenInputError/5070524e0a2678338f7e2a07c08187ba/recovered.edits/0000000000000000012 2024-12-09T11:21:00,499 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:40493/hbase/data/default/testDatalossWhenInputError/5070524e0a2678338f7e2a07c08187ba/recovered.edits/0000000000000000012: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-09T11:21:00,849 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-09T11:21:00,851 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:40493/hbase/WALs/testdatalosswheninputerror-manual,16010,1733743260314, archiveDir=hdfs://localhost:40493/hbase/oldWALs, maxLogs=32 2024-12-09T11:21:00,866 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testdatalosswheninputerror-manual,16010,1733743260314/wal.1733743260851, exclude list is [], retry=0 2024-12-09T11:21:00,869 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46359,DS-8f8100b1-22b1-43bf-9468-405dfca7481e,DISK] 2024-12-09T11:21:00,869 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34459,DS-f790a494-97c1-4864-a7b1-6442795d840b,DISK] 2024-12-09T11:21:00,870 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44093,DS-049b5d3a-b506-46ca-8e7a-3fb74c0e7d3e,DISK] 2024-12-09T11:21:00,872 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testdatalosswheninputerror-manual,16010,1733743260314/wal.1733743260851 2024-12-09T11:21:00,874 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39985:39985),(127.0.0.1/127.0.0.1:39935:39935),(127.0.0.1/127.0.0.1:43581:43581)] 2024-12-09T11:21:00,875 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 5070524e0a2678338f7e2a07c08187ba, NAME => 'testDatalossWhenInputError,,1733743260316.5070524e0a2678338f7e2a07c08187ba.', STARTKEY => '', ENDKEY => ''} 2024-12-09T11:21:00,875 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testDatalossWhenInputError,,1733743260316.5070524e0a2678338f7e2a07c08187ba.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:21:00,875 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 5070524e0a2678338f7e2a07c08187ba 2024-12-09T11:21:00,875 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 5070524e0a2678338f7e2a07c08187ba 2024-12-09T11:21:00,883 INFO [StoreOpener-5070524e0a2678338f7e2a07c08187ba-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 5070524e0a2678338f7e2a07c08187ba 2024-12-09T11:21:00,887 INFO [StoreOpener-5070524e0a2678338f7e2a07c08187ba-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5070524e0a2678338f7e2a07c08187ba columnFamilyName a 2024-12-09T11:21:00,887 DEBUG [StoreOpener-5070524e0a2678338f7e2a07c08187ba-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:21:00,888 INFO [StoreOpener-5070524e0a2678338f7e2a07c08187ba-1 {}] regionserver.HStore(327): Store=5070524e0a2678338f7e2a07c08187ba/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:21:00,888 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 5070524e0a2678338f7e2a07c08187ba 2024-12-09T11:21:00,889 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testDatalossWhenInputError/5070524e0a2678338f7e2a07c08187ba 2024-12-09T11:21:00,891 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testDatalossWhenInputError/5070524e0a2678338f7e2a07c08187ba 2024-12-09T11:21:00,892 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:40493/hbase/data/default/testDatalossWhenInputError/5070524e0a2678338f7e2a07c08187ba/recovered.edits/0000000000000000012 2024-12-09T11:21:00,894 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:40493/hbase/data/default/testDatalossWhenInputError/5070524e0a2678338f7e2a07c08187ba/recovered.edits/0000000000000000012: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-09T11:21:00,895 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 10, skipped 0, firstSequenceIdInLog=3, maxSequenceIdInLog=12, path=hdfs://localhost:40493/hbase/data/default/testDatalossWhenInputError/5070524e0a2678338f7e2a07c08187ba/recovered.edits/0000000000000000012 2024-12-09T11:21:00,895 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 5070524e0a2678338f7e2a07c08187ba 1/1 column families, dataSize=750 B heapSize=1.73 KB 2024-12-09T11:21:00,918 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40493/hbase/data/default/testDatalossWhenInputError/5070524e0a2678338f7e2a07c08187ba/.tmp/a/73d0133a1b7748059b3ccd931d76bcb3 is 79, key is testDatalossWhenInputError/a:x0/1733743260414/Put/seqid=0 2024-12-09T11:21:00,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741919_1097 (size=5808) 2024-12-09T11:21:00,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741919_1097 (size=5808) 2024-12-09T11:21:00,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741919_1097 (size=5808) 2024-12-09T11:21:00,927 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=750 B at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:40493/hbase/data/default/testDatalossWhenInputError/5070524e0a2678338f7e2a07c08187ba/.tmp/a/73d0133a1b7748059b3ccd931d76bcb3 2024-12-09T11:21:00,938 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40493/hbase/data/default/testDatalossWhenInputError/5070524e0a2678338f7e2a07c08187ba/.tmp/a/73d0133a1b7748059b3ccd931d76bcb3 as hdfs://localhost:40493/hbase/data/default/testDatalossWhenInputError/5070524e0a2678338f7e2a07c08187ba/a/73d0133a1b7748059b3ccd931d76bcb3 2024-12-09T11:21:00,945 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40493/hbase/data/default/testDatalossWhenInputError/5070524e0a2678338f7e2a07c08187ba/a/73d0133a1b7748059b3ccd931d76bcb3, entries=10, sequenceid=12, filesize=5.7 K 2024-12-09T11:21:00,946 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~750 B/750, heapSize ~1.72 KB/1760, currentSize=0 B/0 for 5070524e0a2678338f7e2a07c08187ba in 50ms, sequenceid=12, compaction requested=false; wal=null 2024-12-09T11:21:00,946 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:40493/hbase/data/default/testDatalossWhenInputError/5070524e0a2678338f7e2a07c08187ba/recovered.edits/0000000000000000012 2024-12-09T11:21:00,947 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 5070524e0a2678338f7e2a07c08187ba 2024-12-09T11:21:00,947 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 5070524e0a2678338f7e2a07c08187ba 2024-12-09T11:21:00,950 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 5070524e0a2678338f7e2a07c08187ba 2024-12-09T11:21:00,952 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40493/hbase/data/default/testDatalossWhenInputError/5070524e0a2678338f7e2a07c08187ba/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=1 2024-12-09T11:21:00,953 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 5070524e0a2678338f7e2a07c08187ba; next sequenceid=13; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70553219, jitterRate=0.051324889063835144}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T11:21:00,954 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 5070524e0a2678338f7e2a07c08187ba: Writing region info on filesystem at 1733743260875Initializing all the Stores at 1733743260878 (+3 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743260879 (+1 ms)Obtaining lock to block concurrent updates at 1733743260895 (+16 ms)Preparing flush snapshotting stores in 5070524e0a2678338f7e2a07c08187ba at 1733743260895Finished memstore snapshotting testDatalossWhenInputError,,1733743260316.5070524e0a2678338f7e2a07c08187ba., syncing WAL and waiting on mvcc, flushsize=dataSize=750, getHeapSize=1760, getOffHeapSize=0, getCellsCount=10 at 1733743260895Flushing stores of testDatalossWhenInputError,,1733743260316.5070524e0a2678338f7e2a07c08187ba. at 1733743260895Flushing 5070524e0a2678338f7e2a07c08187ba/a: creating writer at 1733743260895Flushing 5070524e0a2678338f7e2a07c08187ba/a: appending metadata at 1733743260917 (+22 ms)Flushing 5070524e0a2678338f7e2a07c08187ba/a: closing flushed file at 1733743260918 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@10317585: reopening flushed file at 1733743260936 (+18 ms)Finished flush of dataSize ~750 B/750, heapSize ~1.72 KB/1760, currentSize=0 B/0 for 5070524e0a2678338f7e2a07c08187ba in 50ms, sequenceid=12, compaction requested=false; wal=null at 1733743260946 (+10 ms)Cleaning up temporary data from old regions at 1733743260947 (+1 ms)Region opened successfully at 1733743260954 (+7 ms) 2024-12-09T11:21:00,957 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 5070524e0a2678338f7e2a07c08187ba, NAME => 'testDatalossWhenInputError,,1733743260316.5070524e0a2678338f7e2a07c08187ba.', STARTKEY => '', ENDKEY => ''} 2024-12-09T11:21:00,957 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testDatalossWhenInputError,,1733743260316.5070524e0a2678338f7e2a07c08187ba.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:21:00,957 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 5070524e0a2678338f7e2a07c08187ba 2024-12-09T11:21:00,957 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 5070524e0a2678338f7e2a07c08187ba 2024-12-09T11:21:00,959 INFO [StoreOpener-5070524e0a2678338f7e2a07c08187ba-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 5070524e0a2678338f7e2a07c08187ba 2024-12-09T11:21:00,959 INFO [StoreOpener-5070524e0a2678338f7e2a07c08187ba-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5070524e0a2678338f7e2a07c08187ba columnFamilyName a 2024-12-09T11:21:00,959 DEBUG [StoreOpener-5070524e0a2678338f7e2a07c08187ba-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:21:00,965 DEBUG [StoreOpener-5070524e0a2678338f7e2a07c08187ba-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:40493/hbase/data/default/testDatalossWhenInputError/5070524e0a2678338f7e2a07c08187ba/a/73d0133a1b7748059b3ccd931d76bcb3 2024-12-09T11:21:00,965 INFO [StoreOpener-5070524e0a2678338f7e2a07c08187ba-1 {}] regionserver.HStore(327): Store=5070524e0a2678338f7e2a07c08187ba/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:21:00,965 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 5070524e0a2678338f7e2a07c08187ba 2024-12-09T11:21:00,966 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testDatalossWhenInputError/5070524e0a2678338f7e2a07c08187ba 2024-12-09T11:21:00,967 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testDatalossWhenInputError/5070524e0a2678338f7e2a07c08187ba 2024-12-09T11:21:00,967 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 5070524e0a2678338f7e2a07c08187ba 2024-12-09T11:21:00,968 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 5070524e0a2678338f7e2a07c08187ba 2024-12-09T11:21:00,969 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 5070524e0a2678338f7e2a07c08187ba 2024-12-09T11:21:00,971 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40493/hbase/data/default/testDatalossWhenInputError/5070524e0a2678338f7e2a07c08187ba/recovered.edits/13.seqid, newMaxSeqId=13, maxSeqId=12 2024-12-09T11:21:00,972 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 5070524e0a2678338f7e2a07c08187ba; next sequenceid=14; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59344630, jitterRate=-0.11569610238075256}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T11:21:00,972 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 5070524e0a2678338f7e2a07c08187ba: Writing region info on filesystem at 1733743260957Initializing all the Stores at 1733743260958 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743260958Cleaning up temporary data from old regions at 1733743260968 (+10 ms)Region opened successfully at 1733743260972 (+4 ms) 2024-12-09T11:21:00,993 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testDatalossWhenInputError Thread=429 (was 415) Potentially hanging thread: PacketResponder: BP-32692473-172.17.0.3-1733743223895:blk_1073741918_1096, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1633493794_22 at /127.0.0.1:60014 [Receiving block BP-32692473-172.17.0.3-1733743223895:blk_1073741918_1096] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-24-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1633493794_22 at /127.0.0.1:60372 [Waiting for operation #11] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1633493794_22 at /127.0.0.1:37690 [Waiting for operation #16] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-24-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-32692473-172.17.0.3-1733743223895:blk_1073741918_1096, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1633493794_22 at /127.0.0.1:60476 [Receiving block BP-32692473-172.17.0.3-1733743223895:blk_1073741918_1096] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1633493794_22 at /127.0.0.1:37766 [Receiving block BP-32692473-172.17.0.3-1733743223895:blk_1073741918_1096] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-32692473-172.17.0.3-1733743223895:blk_1073741918_1096, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1633493794_22 at /127.0.0.1:59954 [Waiting for operation #15] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-24-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1262 (was 1176) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=419 (was 419), ProcessCount=11 (was 11), AvailableMemoryMB=340 (was 351) 2024-12-09T11:21:00,994 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1262 is superior to 1024 2024-12-09T11:21:01,009 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testCompactedBulkLoadedFiles Thread=429, OpenFileDescriptor=1262, MaxFileDescriptor=1048576, SystemLoadAverage=419, ProcessCount=11, AvailableMemoryMB=339 2024-12-09T11:21:01,009 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1262 is superior to 1024 2024-12-09T11:21:01,027 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T11:21:01,029 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T11:21:01,030 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T11:21:01,033 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-76482393, suffix=, logDir=hdfs://localhost:40493/hbase/WALs/hregion-76482393, archiveDir=hdfs://localhost:40493/hbase/oldWALs, maxLogs=32 2024-12-09T11:21:01,050 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-76482393/hregion-76482393.1733743261033, exclude list is [], retry=0 2024-12-09T11:21:01,053 DEBUG [AsyncFSWAL-26-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44093,DS-049b5d3a-b506-46ca-8e7a-3fb74c0e7d3e,DISK] 2024-12-09T11:21:01,053 DEBUG [AsyncFSWAL-26-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46359,DS-8f8100b1-22b1-43bf-9468-405dfca7481e,DISK] 2024-12-09T11:21:01,053 DEBUG [AsyncFSWAL-26-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34459,DS-f790a494-97c1-4864-a7b1-6442795d840b,DISK] 2024-12-09T11:21:01,055 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-76482393/hregion-76482393.1733743261033 2024-12-09T11:21:01,058 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43581:43581),(127.0.0.1/127.0.0.1:39985:39985),(127.0.0.1/127.0.0.1:39935:39935)] 2024-12-09T11:21:01,058 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 760eb882473db22ba02db606a1b2b7f9, NAME => 'testCompactedBulkLoadedFiles,,1733743261027.760eb882473db22ba02db606a1b2b7f9.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testCompactedBulkLoadedFiles', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40493/hbase 2024-12-09T11:21:01,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741921_1099 (size=63) 2024-12-09T11:21:01,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741921_1099 (size=63) 2024-12-09T11:21:01,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741921_1099 (size=63) 2024-12-09T11:21:01,476 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testCompactedBulkLoadedFiles,,1733743261027.760eb882473db22ba02db606a1b2b7f9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:21:01,477 INFO [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 760eb882473db22ba02db606a1b2b7f9 2024-12-09T11:21:01,480 INFO [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 760eb882473db22ba02db606a1b2b7f9 columnFamilyName a 2024-12-09T11:21:01,480 DEBUG [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:21:01,481 INFO [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] regionserver.HStore(327): Store=760eb882473db22ba02db606a1b2b7f9/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:21:01,482 INFO [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 760eb882473db22ba02db606a1b2b7f9 2024-12-09T11:21:01,484 INFO [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 760eb882473db22ba02db606a1b2b7f9 columnFamilyName b 2024-12-09T11:21:01,484 DEBUG [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:21:01,485 INFO [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] regionserver.HStore(327): Store=760eb882473db22ba02db606a1b2b7f9/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:21:01,485 INFO [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 760eb882473db22ba02db606a1b2b7f9 2024-12-09T11:21:01,487 INFO [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 760eb882473db22ba02db606a1b2b7f9 columnFamilyName c 2024-12-09T11:21:01,487 DEBUG [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:21:01,488 INFO [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] regionserver.HStore(327): Store=760eb882473db22ba02db606a1b2b7f9/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:21:01,488 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 760eb882473db22ba02db606a1b2b7f9 2024-12-09T11:21:01,489 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9 2024-12-09T11:21:01,489 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9 2024-12-09T11:21:01,495 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 760eb882473db22ba02db606a1b2b7f9 2024-12-09T11:21:01,495 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 760eb882473db22ba02db606a1b2b7f9 2024-12-09T11:21:01,496 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testCompactedBulkLoadedFiles descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-09T11:21:01,503 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 760eb882473db22ba02db606a1b2b7f9 2024-12-09T11:21:01,506 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T11:21:01,506 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 760eb882473db22ba02db606a1b2b7f9; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68957493, jitterRate=0.0275467187166214}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-09T11:21:01,507 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 760eb882473db22ba02db606a1b2b7f9: Writing region info on filesystem at 1733743261476Initializing all the Stores at 1733743261477 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743261477Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743261477Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743261477Cleaning up temporary data from old regions at 1733743261495 (+18 ms)Region opened successfully at 1733743261507 (+12 ms) 2024-12-09T11:21:01,507 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 760eb882473db22ba02db606a1b2b7f9, disabling compactions & flushes 2024-12-09T11:21:01,507 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testCompactedBulkLoadedFiles,,1733743261027.760eb882473db22ba02db606a1b2b7f9. 2024-12-09T11:21:01,507 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testCompactedBulkLoadedFiles,,1733743261027.760eb882473db22ba02db606a1b2b7f9. 2024-12-09T11:21:01,507 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testCompactedBulkLoadedFiles,,1733743261027.760eb882473db22ba02db606a1b2b7f9. after waiting 0 ms 2024-12-09T11:21:01,507 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testCompactedBulkLoadedFiles,,1733743261027.760eb882473db22ba02db606a1b2b7f9. 2024-12-09T11:21:01,508 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testCompactedBulkLoadedFiles,,1733743261027.760eb882473db22ba02db606a1b2b7f9. 2024-12-09T11:21:01,508 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 760eb882473db22ba02db606a1b2b7f9: Waiting for close lock at 1733743261507Disabling compacts and flushes for region at 1733743261507Disabling writes for close at 1733743261507Writing region close event to WAL at 1733743261508 (+1 ms)Closed at 1733743261508 2024-12-09T11:21:01,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741920_1098 (size=95) 2024-12-09T11:21:01,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741920_1098 (size=95) 2024-12-09T11:21:01,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741920_1098 (size=95) 2024-12-09T11:21:01,523 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-09T11:21:01,523 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-76482393:(num 1733743261033) 2024-12-09T11:21:01,523 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-09T11:21:01,526 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:40493/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733743261026, archiveDir=hdfs://localhost:40493/hbase/oldWALs, maxLogs=32 2024-12-09T11:21:01,541 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733743261026/wal.1733743261526, exclude list is [], retry=0 2024-12-09T11:21:01,544 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44093,DS-049b5d3a-b506-46ca-8e7a-3fb74c0e7d3e,DISK] 2024-12-09T11:21:01,545 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34459,DS-f790a494-97c1-4864-a7b1-6442795d840b,DISK] 2024-12-09T11:21:01,545 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46359,DS-8f8100b1-22b1-43bf-9468-405dfca7481e,DISK] 2024-12-09T11:21:01,548 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733743261026/wal.1733743261526 2024-12-09T11:21:01,549 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43581:43581),(127.0.0.1/127.0.0.1:39935:39935),(127.0.0.1/127.0.0.1:39985:39985)] 2024-12-09T11:21:01,549 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 760eb882473db22ba02db606a1b2b7f9, NAME => 'testCompactedBulkLoadedFiles,,1733743261027.760eb882473db22ba02db606a1b2b7f9.', STARTKEY => '', ENDKEY => ''} 2024-12-09T11:21:01,549 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testCompactedBulkLoadedFiles,,1733743261027.760eb882473db22ba02db606a1b2b7f9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:21:01,549 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 760eb882473db22ba02db606a1b2b7f9 2024-12-09T11:21:01,549 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 760eb882473db22ba02db606a1b2b7f9 2024-12-09T11:21:01,552 INFO [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 760eb882473db22ba02db606a1b2b7f9 2024-12-09T11:21:01,553 INFO [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 760eb882473db22ba02db606a1b2b7f9 columnFamilyName a 2024-12-09T11:21:01,553 DEBUG [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:21:01,554 INFO [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] regionserver.HStore(327): Store=760eb882473db22ba02db606a1b2b7f9/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:21:01,554 INFO [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 760eb882473db22ba02db606a1b2b7f9 2024-12-09T11:21:01,555 INFO [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 760eb882473db22ba02db606a1b2b7f9 columnFamilyName b 2024-12-09T11:21:01,555 DEBUG [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:21:01,555 INFO [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] regionserver.HStore(327): Store=760eb882473db22ba02db606a1b2b7f9/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:21:01,555 INFO [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 760eb882473db22ba02db606a1b2b7f9 2024-12-09T11:21:01,556 INFO [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 760eb882473db22ba02db606a1b2b7f9 columnFamilyName c 2024-12-09T11:21:01,556 DEBUG [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:21:01,557 INFO [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] regionserver.HStore(327): Store=760eb882473db22ba02db606a1b2b7f9/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:21:01,557 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 760eb882473db22ba02db606a1b2b7f9 2024-12-09T11:21:01,558 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9 2024-12-09T11:21:01,559 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9 2024-12-09T11:21:01,560 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 760eb882473db22ba02db606a1b2b7f9 2024-12-09T11:21:01,560 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 760eb882473db22ba02db606a1b2b7f9 2024-12-09T11:21:01,561 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testCompactedBulkLoadedFiles descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-09T11:21:01,563 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 760eb882473db22ba02db606a1b2b7f9 2024-12-09T11:21:01,564 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 760eb882473db22ba02db606a1b2b7f9; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72705630, jitterRate=0.08339831233024597}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-09T11:21:01,565 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 760eb882473db22ba02db606a1b2b7f9: Writing region info on filesystem at 1733743261550Initializing all the Stores at 1733743261551 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743261551Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743261552 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743261552Cleaning up temporary data from old regions at 1733743261560 (+8 ms)Region opened successfully at 1733743261564 (+4 ms) 2024-12-09T11:21:01,569 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40493/hbase/testCompactedBulkLoadedFiles/hfile0 is 32, key is 000/a:a/1733743261569/Put/seqid=0 2024-12-09T11:21:01,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741923_1101 (size=4875) 2024-12-09T11:21:01,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741923_1101 (size=4875) 2024-12-09T11:21:01,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741923_1101 (size=4875) 2024-12-09T11:21:01,579 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40493/hbase/testCompactedBulkLoadedFiles/hfile1 is 32, key is 100/a:a/1733743261579/Put/seqid=0 2024-12-09T11:21:01,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741924_1102 (size=4875) 2024-12-09T11:21:01,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741924_1102 (size=4875) 2024-12-09T11:21:01,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741924_1102 (size=4875) 2024-12-09T11:21:01,610 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40493/hbase/testCompactedBulkLoadedFiles/hfile2 is 32, key is 200/a:a/1733743261610/Put/seqid=0 2024-12-09T11:21:01,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741925_1103 (size=4875) 2024-12-09T11:21:01,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741925_1103 (size=4875) 2024-12-09T11:21:01,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741925_1103 (size=4875) 2024-12-09T11:21:01,629 INFO [Time-limited test {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:40493/hbase/testCompactedBulkLoadedFiles/hfile0 for inclusion in 760eb882473db22ba02db606a1b2b7f9/a 2024-12-09T11:21:01,634 DEBUG [Time-limited test {}] regionserver.HStore(626): HFile bounds: first=000 last=050 2024-12-09T11:21:01,634 DEBUG [Time-limited test {}] regionserver.HStore(628): Region bounds: first= last= 2024-12-09T11:21:01,634 INFO [Time-limited test {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:40493/hbase/testCompactedBulkLoadedFiles/hfile1 for inclusion in 760eb882473db22ba02db606a1b2b7f9/a 2024-12-09T11:21:01,642 DEBUG [Time-limited test {}] regionserver.HStore(626): HFile bounds: first=100 last=150 2024-12-09T11:21:01,642 DEBUG [Time-limited test {}] regionserver.HStore(628): Region bounds: first= last= 2024-12-09T11:21:01,642 INFO [Time-limited test {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:40493/hbase/testCompactedBulkLoadedFiles/hfile2 for inclusion in 760eb882473db22ba02db606a1b2b7f9/a 2024-12-09T11:21:01,646 DEBUG [Time-limited test {}] regionserver.HStore(626): HFile bounds: first=200 last=250 2024-12-09T11:21:01,646 DEBUG [Time-limited test {}] regionserver.HStore(628): Region bounds: first= last= 2024-12-09T11:21:01,646 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 760eb882473db22ba02db606a1b2b7f9 3/3 column families, dataSize=51 B heapSize=896 B 2024-12-09T11:21:01,666 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/.tmp/a/529d8d387b5e42aa96de839ba057fb68 is 55, key is testCompactedBulkLoadedFiles/a:a/1733743261565/Put/seqid=0 2024-12-09T11:21:01,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741926_1104 (size=5107) 2024-12-09T11:21:01,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741926_1104 (size=5107) 2024-12-09T11:21:01,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741926_1104 (size=5107) 2024-12-09T11:21:02,087 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51 B at sequenceid=4 (bloomFilter=true), to=hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/.tmp/a/529d8d387b5e42aa96de839ba057fb68 2024-12-09T11:21:02,094 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/.tmp/a/529d8d387b5e42aa96de839ba057fb68 as hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/a/529d8d387b5e42aa96de839ba057fb68 2024-12-09T11:21:02,100 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/a/529d8d387b5e42aa96de839ba057fb68, entries=1, sequenceid=4, filesize=5.0 K 2024-12-09T11:21:02,102 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~51 B/51, heapSize ~368 B/368, currentSize=0 B/0 for 760eb882473db22ba02db606a1b2b7f9 in 456ms, sequenceid=4, compaction requested=false 2024-12-09T11:21:02,102 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 760eb882473db22ba02db606a1b2b7f9: 2024-12-09T11:21:02,103 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40493/hbase/testCompactedBulkLoadedFiles/hfile0 as hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/a/508c27680f14476980408574c96d5794_SeqId_4_ 2024-12-09T11:21:02,104 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40493/hbase/testCompactedBulkLoadedFiles/hfile1 as hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/a/722d13e656424d9b84f38e6c308249a9_SeqId_4_ 2024-12-09T11:21:02,106 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40493/hbase/testCompactedBulkLoadedFiles/hfile2 as hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/a/21d4d7a3d6024b7fb35fe6be60ed0036_SeqId_4_ 2024-12-09T11:21:02,106 INFO [Time-limited test {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:40493/hbase/testCompactedBulkLoadedFiles/hfile0 into 760eb882473db22ba02db606a1b2b7f9/a as hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/a/508c27680f14476980408574c96d5794_SeqId_4_ - updating store file list. 2024-12-09T11:21:02,117 DEBUG [Time-limited test {}] regionserver.HStoreFile(483): HFile Bloom filter type for 508c27680f14476980408574c96d5794_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-09T11:21:02,117 INFO [Time-limited test {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/a/508c27680f14476980408574c96d5794_SeqId_4_ into 760eb882473db22ba02db606a1b2b7f9/a 2024-12-09T11:21:02,117 INFO [Time-limited test {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:40493/hbase/testCompactedBulkLoadedFiles/hfile0 into 760eb882473db22ba02db606a1b2b7f9/a (new location: hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/a/508c27680f14476980408574c96d5794_SeqId_4_) 2024-12-09T11:21:02,118 INFO [Time-limited test {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:40493/hbase/testCompactedBulkLoadedFiles/hfile1 into 760eb882473db22ba02db606a1b2b7f9/a as hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/a/722d13e656424d9b84f38e6c308249a9_SeqId_4_ - updating store file list. 2024-12-09T11:21:02,124 DEBUG [Time-limited test {}] regionserver.HStoreFile(483): HFile Bloom filter type for 722d13e656424d9b84f38e6c308249a9_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-09T11:21:02,124 INFO [Time-limited test {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/a/722d13e656424d9b84f38e6c308249a9_SeqId_4_ into 760eb882473db22ba02db606a1b2b7f9/a 2024-12-09T11:21:02,124 INFO [Time-limited test {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:40493/hbase/testCompactedBulkLoadedFiles/hfile1 into 760eb882473db22ba02db606a1b2b7f9/a (new location: hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/a/722d13e656424d9b84f38e6c308249a9_SeqId_4_) 2024-12-09T11:21:02,125 INFO [Time-limited test {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:40493/hbase/testCompactedBulkLoadedFiles/hfile2 into 760eb882473db22ba02db606a1b2b7f9/a as hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/a/21d4d7a3d6024b7fb35fe6be60ed0036_SeqId_4_ - updating store file list. 2024-12-09T11:21:02,131 DEBUG [Time-limited test {}] regionserver.HStoreFile(483): HFile Bloom filter type for 21d4d7a3d6024b7fb35fe6be60ed0036_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-09T11:21:02,131 INFO [Time-limited test {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/a/21d4d7a3d6024b7fb35fe6be60ed0036_SeqId_4_ into 760eb882473db22ba02db606a1b2b7f9/a 2024-12-09T11:21:02,131 INFO [Time-limited test {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:40493/hbase/testCompactedBulkLoadedFiles/hfile2 into 760eb882473db22ba02db606a1b2b7f9/a (new location: hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/a/21d4d7a3d6024b7fb35fe6be60ed0036_SeqId_4_) 2024-12-09T11:21:02,140 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-09T11:21:02,140 DEBUG [Time-limited test {}] regionserver.HStore(1541): 760eb882473db22ba02db606a1b2b7f9/a is initiating major compaction (all files) 2024-12-09T11:21:02,140 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 760eb882473db22ba02db606a1b2b7f9/a in testCompactedBulkLoadedFiles,,1733743261027.760eb882473db22ba02db606a1b2b7f9. 2024-12-09T11:21:02,141 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/a/529d8d387b5e42aa96de839ba057fb68, hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/a/508c27680f14476980408574c96d5794_SeqId_4_, hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/a/722d13e656424d9b84f38e6c308249a9_SeqId_4_, hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/a/21d4d7a3d6024b7fb35fe6be60ed0036_SeqId_4_] into tmpdir=hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/.tmp, totalSize=19.3 K 2024-12-09T11:21:02,141 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 529d8d387b5e42aa96de839ba057fb68, keycount=1, bloomtype=ROW, size=5.0 K, encoding=NONE, compression=NONE, seqNum=4, earliestPutTs=1733743261565 2024-12-09T11:21:02,142 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 508c27680f14476980408574c96d5794_SeqId_4_, keycount=10, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=4, earliestPutTs=-9223372036854775808 2024-12-09T11:21:02,142 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 722d13e656424d9b84f38e6c308249a9_SeqId_4_, keycount=10, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=4, earliestPutTs=-9223372036854775808 2024-12-09T11:21:02,143 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 21d4d7a3d6024b7fb35fe6be60ed0036_SeqId_4_, keycount=10, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=4, earliestPutTs=-9223372036854775808 2024-12-09T11:21:02,160 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/.tmp/a/21a6548d35b44d47bc478af81bc05096 is 55, key is testCompactedBulkLoadedFiles/a:a/1733743261565/Put/seqid=0 2024-12-09T11:21:02,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741927_1105 (size=6154) 2024-12-09T11:21:02,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741927_1105 (size=6154) 2024-12-09T11:21:02,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741927_1105 (size=6154) 2024-12-09T11:21:02,219 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/.tmp/a/21a6548d35b44d47bc478af81bc05096 as hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/a/21a6548d35b44d47bc478af81bc05096 2024-12-09T11:21:02,239 INFO [Time-limited test {}] regionserver.HStore(1337): Completed major compaction of 4 (all) file(s) in 760eb882473db22ba02db606a1b2b7f9/a of 760eb882473db22ba02db606a1b2b7f9 into 21a6548d35b44d47bc478af81bc05096(size=6.0 K), total size for store is 6.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T11:21:02,239 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 760eb882473db22ba02db606a1b2b7f9: 2024-12-09T11:21:02,239 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 0 store files, 0 compacting, 0 eligible, 16 blocking 2024-12-09T11:21:02,239 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 0 store files, 0 compacting, 0 eligible, 16 blocking 2024-12-09T11:21:02,296 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:40493/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733743261026/wal.1733743261526, size=0 (0bytes) 2024-12-09T11:21:02,296 WARN [Time-limited test {}] wal.WALSplitter(453): File hdfs://localhost:40493/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733743261026/wal.1733743261526 might be still open, length is 0 2024-12-09T11:21:02,297 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40493/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733743261026/wal.1733743261526 2024-12-09T11:21:02,297 WARN [IPC Server handler 4 on default port 40493 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733743261026/wal.1733743261526 has not been closed. Lease recovery is in progress. RecoveryId = 1106 for block blk_1073741922_1100 2024-12-09T11:21:02,297 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40493/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733743261026/wal.1733743261526 after 0ms 2024-12-09T11:21:05,186 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1633493794_22 at /127.0.0.1:60512 [Receiving block BP-32692473-172.17.0.3-1733743223895:blk_1073741922_1100] {}] datanode.DataXceiver(331): 127.0.0.1:44093:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60512 dst: /127.0.0.1:44093 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:44093 remote=/127.0.0.1:60512]. Total timeout mills is 60000, 57043 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:21:05,187 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1633493794_22 at /127.0.0.1:37798 [Receiving block BP-32692473-172.17.0.3-1733743223895:blk_1073741922_1100] {}] datanode.DataXceiver(331): 127.0.0.1:34459:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37798 dst: /127.0.0.1:34459 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:21:05,187 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1633493794_22 at /127.0.0.1:60058 [Receiving block BP-32692473-172.17.0.3-1733743223895:blk_1073741922_1100] {}] datanode.DataXceiver(331): 127.0.0.1:46359:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60058 dst: /127.0.0.1:46359 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:21:05,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741922_1106 (size=1174) 2024-12-09T11:21:05,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741922_1106 (size=1174) 2024-12-09T11:21:05,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741922_1106 (size=1174) 2024-12-09T11:21:05,464 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-09T11:21:06,298 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:40493/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733743261026/wal.1733743261526 after 4001ms 2024-12-09T11:21:06,301 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:40493/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733743261026/wal.1733743261526: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-09T11:21:06,301 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:40493/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733743261026/wal.1733743261526 took 4005ms 2024-12-09T11:21:06,303 INFO [Time-limited test {}] wal.WALSplitter(493): EOF from hdfs://localhost:40493/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733743261026/wal.1733743261526; continuing. 2024-12-09T11:21:06,303 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:40493/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733743261026/wal.1733743261526 so closing down 2024-12-09T11:21:06,303 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-09T11:21:06,305 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1733743261526.temp 2024-12-09T11:21:06,306 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/recovered.edits/0000000000000000003-wal.1733743261526.temp 2024-12-09T11:21:06,307 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-09T11:21:06,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741928_1107 (size=548) 2024-12-09T11:21:06,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741928_1107 (size=548) 2024-12-09T11:21:06,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741928_1107 (size=548) 2024-12-09T11:21:06,316 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/recovered.edits/0000000000000000003-wal.1733743261526.temp (wrote 2 edits, skipped 0 edits in 0 ms) 2024-12-09T11:21:06,317 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/recovered.edits/0000000000000000003-wal.1733743261526.temp to hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/recovered.edits/0000000000000000008 2024-12-09T11:21:06,318 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 5 edits across 1 Regions in 15 ms; skipped=3; WAL=hdfs://localhost:40493/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733743261026/wal.1733743261526, size=0, length=0, corrupted=false, cancelled=false 2024-12-09T11:21:06,318 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:40493/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733743261026/wal.1733743261526, journal: Splitting hdfs://localhost:40493/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733743261026/wal.1733743261526, size=0 (0bytes) at 1733743262296Finishing writing output for hdfs://localhost:40493/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733743261026/wal.1733743261526 so closing down at 1733743266303 (+4007 ms)Creating recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/recovered.edits/0000000000000000003-wal.1733743261526.temp at 1733743266306 (+3 ms)3 split writer threads finished at 1733743266307 (+1 ms)Closed recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/recovered.edits/0000000000000000003-wal.1733743261526.temp (wrote 2 edits, skipped 0 edits in 0 ms) at 1733743266316 (+9 ms)Rename recovered edits hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/recovered.edits/0000000000000000003-wal.1733743261526.temp to hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/recovered.edits/0000000000000000008 at 1733743266317 (+1 ms)Processed 5 edits across 1 Regions in 15 ms; skipped=3; WAL=hdfs://localhost:40493/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733743261026/wal.1733743261526, size=0, length=0, corrupted=false, cancelled=false at 1733743266318 (+1 ms) 2024-12-09T11:21:06,319 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:40493/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733743261026/wal.1733743261526 to hdfs://localhost:40493/hbase/oldWALs/wal.1733743261526 2024-12-09T11:21:06,320 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/recovered.edits/0000000000000000008 2024-12-09T11:21:06,320 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-09T11:21:06,322 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:40493/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733743261026, archiveDir=hdfs://localhost:40493/hbase/oldWALs, maxLogs=32 2024-12-09T11:21:06,343 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733743261026/wal.1733743266323, exclude list is [], retry=0 2024-12-09T11:21:06,345 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44093,DS-049b5d3a-b506-46ca-8e7a-3fb74c0e7d3e,DISK] 2024-12-09T11:21:06,346 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46359,DS-8f8100b1-22b1-43bf-9468-405dfca7481e,DISK] 2024-12-09T11:21:06,346 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34459,DS-f790a494-97c1-4864-a7b1-6442795d840b,DISK] 2024-12-09T11:21:06,348 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733743261026/wal.1733743266323 2024-12-09T11:21:06,348 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43581:43581),(127.0.0.1/127.0.0.1:39985:39985),(127.0.0.1/127.0.0.1:39935:39935)] 2024-12-09T11:21:06,348 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 760eb882473db22ba02db606a1b2b7f9, NAME => 'testCompactedBulkLoadedFiles,,1733743261027.760eb882473db22ba02db606a1b2b7f9.', STARTKEY => '', ENDKEY => ''} 2024-12-09T11:21:06,348 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testCompactedBulkLoadedFiles,,1733743261027.760eb882473db22ba02db606a1b2b7f9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:21:06,348 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 760eb882473db22ba02db606a1b2b7f9 2024-12-09T11:21:06,348 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 760eb882473db22ba02db606a1b2b7f9 2024-12-09T11:21:06,350 INFO [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 760eb882473db22ba02db606a1b2b7f9 2024-12-09T11:21:06,350 INFO [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 760eb882473db22ba02db606a1b2b7f9 columnFamilyName a 2024-12-09T11:21:06,351 DEBUG [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:21:06,356 DEBUG [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/a/21a6548d35b44d47bc478af81bc05096 2024-12-09T11:21:06,359 DEBUG [StoreFileOpener-760eb882473db22ba02db606a1b2b7f9-a-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 21d4d7a3d6024b7fb35fe6be60ed0036_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-09T11:21:06,359 DEBUG [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/a/21d4d7a3d6024b7fb35fe6be60ed0036_SeqId_4_ 2024-12-09T11:21:06,363 DEBUG [StoreFileOpener-760eb882473db22ba02db606a1b2b7f9-a-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 508c27680f14476980408574c96d5794_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-09T11:21:06,363 DEBUG [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/a/508c27680f14476980408574c96d5794_SeqId_4_ 2024-12-09T11:21:06,366 DEBUG [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/a/529d8d387b5e42aa96de839ba057fb68 2024-12-09T11:21:06,369 DEBUG [StoreFileOpener-760eb882473db22ba02db606a1b2b7f9-a-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 722d13e656424d9b84f38e6c308249a9_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-09T11:21:06,369 DEBUG [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/a/722d13e656424d9b84f38e6c308249a9_SeqId_4_ 2024-12-09T11:21:06,369 WARN [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] regionserver.StoreEngine(317): Clearing the compacted storefile hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/a/21d4d7a3d6024b7fb35fe6be60ed0036_SeqId_4_ from org.apache.hadoop.hbase.regionserver.DefaultStoreEngine@305fd0da 2024-12-09T11:21:06,370 WARN [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] regionserver.StoreEngine(317): Clearing the compacted storefile hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/a/508c27680f14476980408574c96d5794_SeqId_4_ from org.apache.hadoop.hbase.regionserver.DefaultStoreEngine@305fd0da 2024-12-09T11:21:06,370 WARN [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] regionserver.StoreEngine(317): Clearing the compacted storefile hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/a/529d8d387b5e42aa96de839ba057fb68 from org.apache.hadoop.hbase.regionserver.DefaultStoreEngine@305fd0da 2024-12-09T11:21:06,370 WARN [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] regionserver.StoreEngine(317): Clearing the compacted storefile hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/a/722d13e656424d9b84f38e6c308249a9_SeqId_4_ from org.apache.hadoop.hbase.regionserver.DefaultStoreEngine@305fd0da 2024-12-09T11:21:06,370 DEBUG [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] regionserver.StoreEngine(327): Moving the files [hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/a/21d4d7a3d6024b7fb35fe6be60ed0036_SeqId_4_, hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/a/508c27680f14476980408574c96d5794_SeqId_4_, hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/a/529d8d387b5e42aa96de839ba057fb68, hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/a/722d13e656424d9b84f38e6c308249a9_SeqId_4_] to archive 2024-12-09T11:21:06,370 DEBUG [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T11:21:06,372 DEBUG [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/a/21d4d7a3d6024b7fb35fe6be60ed0036_SeqId_4_ to hdfs://localhost:40493/hbase/archive/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/a/21d4d7a3d6024b7fb35fe6be60ed0036_SeqId_4_ 2024-12-09T11:21:06,373 DEBUG [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/a/508c27680f14476980408574c96d5794_SeqId_4_ to hdfs://localhost:40493/hbase/archive/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/a/508c27680f14476980408574c96d5794_SeqId_4_ 2024-12-09T11:21:06,374 DEBUG [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/a/529d8d387b5e42aa96de839ba057fb68 to hdfs://localhost:40493/hbase/archive/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/a/529d8d387b5e42aa96de839ba057fb68 2024-12-09T11:21:06,375 DEBUG [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/a/722d13e656424d9b84f38e6c308249a9_SeqId_4_ to hdfs://localhost:40493/hbase/archive/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/a/722d13e656424d9b84f38e6c308249a9_SeqId_4_ 2024-12-09T11:21:06,375 INFO [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] regionserver.HStore(327): Store=760eb882473db22ba02db606a1b2b7f9/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:21:06,375 INFO [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 760eb882473db22ba02db606a1b2b7f9 2024-12-09T11:21:06,376 INFO [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 760eb882473db22ba02db606a1b2b7f9 columnFamilyName b 2024-12-09T11:21:06,376 DEBUG [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:21:06,376 INFO [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] regionserver.HStore(327): Store=760eb882473db22ba02db606a1b2b7f9/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:21:06,377 INFO [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 760eb882473db22ba02db606a1b2b7f9 2024-12-09T11:21:06,377 INFO [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 760eb882473db22ba02db606a1b2b7f9 columnFamilyName c 2024-12-09T11:21:06,377 DEBUG [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:21:06,378 INFO [StoreOpener-760eb882473db22ba02db606a1b2b7f9-1 {}] regionserver.HStore(327): Store=760eb882473db22ba02db606a1b2b7f9/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:21:06,378 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 760eb882473db22ba02db606a1b2b7f9 2024-12-09T11:21:06,378 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9 2024-12-09T11:21:06,380 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9 2024-12-09T11:21:06,380 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/recovered.edits/0000000000000000008 2024-12-09T11:21:06,382 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/recovered.edits/0000000000000000008: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-09T11:21:06,384 DEBUG [Time-limited test {}] regionserver.HRegion(5836): 760eb882473db22ba02db606a1b2b7f9 : Replaying compaction marker table_name: "testCompactedBulkLoadedFiles" encoded_region_name: "760eb882473db22ba02db606a1b2b7f9" family_name: "a" compaction_input: "529d8d387b5e42aa96de839ba057fb68" compaction_input: "508c27680f14476980408574c96d5794_SeqId_4_" compaction_input: "722d13e656424d9b84f38e6c308249a9_SeqId_4_" compaction_input: "21d4d7a3d6024b7fb35fe6be60ed0036_SeqId_4_" compaction_output: "21a6548d35b44d47bc478af81bc05096" store_home_dir: "a" region_name: "testCompactedBulkLoadedFiles,,1733743261027.760eb882473db22ba02db606a1b2b7f9." with seqId=9223372036854775807 and lastReplayedOpenRegionSeqId=-1 2024-12-09T11:21:06,384 DEBUG [Time-limited test {}] regionserver.HStore(1354): Completing compaction from the WAL marker 2024-12-09T11:21:06,384 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 0, skipped 2, firstSequenceIdInLog=3, maxSequenceIdInLog=8, path=hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/recovered.edits/0000000000000000008 2024-12-09T11:21:06,385 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/recovered.edits/0000000000000000008 2024-12-09T11:21:06,386 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 760eb882473db22ba02db606a1b2b7f9 2024-12-09T11:21:06,386 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 760eb882473db22ba02db606a1b2b7f9 2024-12-09T11:21:06,387 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testCompactedBulkLoadedFiles descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-09T11:21:06,396 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 760eb882473db22ba02db606a1b2b7f9 2024-12-09T11:21:06,399 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40493/hbase/data/default/testCompactedBulkLoadedFiles/760eb882473db22ba02db606a1b2b7f9/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-09T11:21:06,400 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 760eb882473db22ba02db606a1b2b7f9; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64557776, jitterRate=-0.03801417350769043}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-09T11:21:06,400 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 760eb882473db22ba02db606a1b2b7f9: Writing region info on filesystem at 1733743266349Initializing all the Stores at 1733743266349Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743266349Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743266349Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743266349Cleaning up temporary data from old regions at 1733743266386 (+37 ms)Region opened successfully at 1733743266400 (+14 ms) 2024-12-09T11:21:06,403 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 760eb882473db22ba02db606a1b2b7f9, disabling compactions & flushes 2024-12-09T11:21:06,403 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testCompactedBulkLoadedFiles,,1733743261027.760eb882473db22ba02db606a1b2b7f9. 2024-12-09T11:21:06,403 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testCompactedBulkLoadedFiles,,1733743261027.760eb882473db22ba02db606a1b2b7f9. 2024-12-09T11:21:06,403 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testCompactedBulkLoadedFiles,,1733743261027.760eb882473db22ba02db606a1b2b7f9. after waiting 0 ms 2024-12-09T11:21:06,403 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testCompactedBulkLoadedFiles,,1733743261027.760eb882473db22ba02db606a1b2b7f9. 2024-12-09T11:21:06,407 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testCompactedBulkLoadedFiles,,1733743261027.760eb882473db22ba02db606a1b2b7f9. 2024-12-09T11:21:06,407 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 760eb882473db22ba02db606a1b2b7f9: Waiting for close lock at 1733743266403Disabling compacts and flushes for region at 1733743266403Disabling writes for close at 1733743266403Writing region close event to WAL at 1733743266407 (+4 ms)Closed at 1733743266407 2024-12-09T11:21:06,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741929_1108 (size=95) 2024-12-09T11:21:06,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741929_1108 (size=95) 2024-12-09T11:21:06,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741929_1108 (size=95) 2024-12-09T11:21:06,415 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-09T11:21:06,415 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1733743266323) 2024-12-09T11:21:06,438 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testCompactedBulkLoadedFiles Thread=437 (was 429) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1804981204_22 at /127.0.0.1:37854 [Waiting for operation #13] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkinstestCompactedBulkLoadedFiles@localhost:40493 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-26-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45857 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1319272049) connection to localhost/127.0.0.1:40493 from jenkinstestCompactedBulkLoadedFiles java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: AsyncFSWAL-26-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1319272049) connection to localhost/127.0.0.1:45857 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1804981204_22 at /127.0.0.1:60588 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-26-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1339 (was 1262) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=425 (was 419) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=296 (was 339) 2024-12-09T11:21:06,438 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1339 is superior to 1024 2024-12-09T11:21:06,454 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsWrittenViaHRegion Thread=437, OpenFileDescriptor=1339, MaxFileDescriptor=1048576, SystemLoadAverage=425, ProcessCount=11, AvailableMemoryMB=296 2024-12-09T11:21:06,454 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1339 is superior to 1024 2024-12-09T11:21:06,469 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T11:21:06,471 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T11:21:06,471 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T11:21:06,474 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-93592375, suffix=, logDir=hdfs://localhost:40493/hbase/WALs/hregion-93592375, archiveDir=hdfs://localhost:40493/hbase/oldWALs, maxLogs=32 2024-12-09T11:21:06,487 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-93592375/hregion-93592375.1733743266474, exclude list is [], retry=0 2024-12-09T11:21:06,490 DEBUG [AsyncFSWAL-28-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46359,DS-8f8100b1-22b1-43bf-9468-405dfca7481e,DISK] 2024-12-09T11:21:06,490 DEBUG [AsyncFSWAL-28-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44093,DS-049b5d3a-b506-46ca-8e7a-3fb74c0e7d3e,DISK] 2024-12-09T11:21:06,491 DEBUG [AsyncFSWAL-28-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34459,DS-f790a494-97c1-4864-a7b1-6442795d840b,DISK] 2024-12-09T11:21:06,492 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-93592375/hregion-93592375.1733743266474 2024-12-09T11:21:06,493 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39985:39985),(127.0.0.1/127.0.0.1:43581:43581),(127.0.0.1/127.0.0.1:39935:39935)] 2024-12-09T11:21:06,493 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 7936c6f8ca28c09f50ba0ee7d098d193, NAME => 'testReplayEditsWrittenViaHRegion,,1733743266470.7936c6f8ca28c09f50ba0ee7d098d193.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenViaHRegion', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40493/hbase 2024-12-09T11:21:06,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741931_1110 (size=67) 2024-12-09T11:21:06,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741931_1110 (size=67) 2024-12-09T11:21:06,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741931_1110 (size=67) 2024-12-09T11:21:06,504 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1733743266470.7936c6f8ca28c09f50ba0ee7d098d193.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:21:06,505 INFO [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 7936c6f8ca28c09f50ba0ee7d098d193 2024-12-09T11:21:06,506 INFO [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7936c6f8ca28c09f50ba0ee7d098d193 columnFamilyName a 2024-12-09T11:21:06,506 DEBUG [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:21:06,507 INFO [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] regionserver.HStore(327): Store=7936c6f8ca28c09f50ba0ee7d098d193/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:21:06,507 INFO [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 7936c6f8ca28c09f50ba0ee7d098d193 2024-12-09T11:21:06,508 INFO [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7936c6f8ca28c09f50ba0ee7d098d193 columnFamilyName b 2024-12-09T11:21:06,508 DEBUG [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:21:06,508 INFO [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] regionserver.HStore(327): Store=7936c6f8ca28c09f50ba0ee7d098d193/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:21:06,508 INFO [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 7936c6f8ca28c09f50ba0ee7d098d193 2024-12-09T11:21:06,509 INFO [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7936c6f8ca28c09f50ba0ee7d098d193 columnFamilyName c 2024-12-09T11:21:06,510 DEBUG [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:21:06,510 INFO [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] regionserver.HStore(327): Store=7936c6f8ca28c09f50ba0ee7d098d193/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:21:06,510 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 7936c6f8ca28c09f50ba0ee7d098d193 2024-12-09T11:21:06,511 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193 2024-12-09T11:21:06,511 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193 2024-12-09T11:21:06,512 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 7936c6f8ca28c09f50ba0ee7d098d193 2024-12-09T11:21:06,512 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 7936c6f8ca28c09f50ba0ee7d098d193 2024-12-09T11:21:06,512 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-09T11:21:06,513 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 7936c6f8ca28c09f50ba0ee7d098d193 2024-12-09T11:21:06,515 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T11:21:06,516 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 7936c6f8ca28c09f50ba0ee7d098d193; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74661346, jitterRate=0.11254075169563293}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-09T11:21:06,516 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 7936c6f8ca28c09f50ba0ee7d098d193: Writing region info on filesystem at 1733743266504Initializing all the Stores at 1733743266504Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743266504Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743266504Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743266505 (+1 ms)Cleaning up temporary data from old regions at 1733743266512 (+7 ms)Region opened successfully at 1733743266516 (+4 ms) 2024-12-09T11:21:06,516 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 7936c6f8ca28c09f50ba0ee7d098d193, disabling compactions & flushes 2024-12-09T11:21:06,516 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1733743266470.7936c6f8ca28c09f50ba0ee7d098d193. 2024-12-09T11:21:06,516 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1733743266470.7936c6f8ca28c09f50ba0ee7d098d193. 2024-12-09T11:21:06,516 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1733743266470.7936c6f8ca28c09f50ba0ee7d098d193. after waiting 0 ms 2024-12-09T11:21:06,516 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1733743266470.7936c6f8ca28c09f50ba0ee7d098d193. 2024-12-09T11:21:06,517 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1733743266470.7936c6f8ca28c09f50ba0ee7d098d193. 2024-12-09T11:21:06,517 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 7936c6f8ca28c09f50ba0ee7d098d193: Waiting for close lock at 1733743266516Disabling compacts and flushes for region at 1733743266516Disabling writes for close at 1733743266516Writing region close event to WAL at 1733743266517 (+1 ms)Closed at 1733743266517 2024-12-09T11:21:06,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741930_1109 (size=95) 2024-12-09T11:21:06,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741930_1109 (size=95) 2024-12-09T11:21:06,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741930_1109 (size=95) 2024-12-09T11:21:06,521 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-09T11:21:06,521 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-93592375:(num 1733743266474) 2024-12-09T11:21:06,521 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-09T11:21:06,523 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:40493/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733743266468, archiveDir=hdfs://localhost:40493/hbase/oldWALs, maxLogs=32 2024-12-09T11:21:06,536 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733743266468/wal.1733743266523, exclude list is [], retry=0 2024-12-09T11:21:06,539 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34459,DS-f790a494-97c1-4864-a7b1-6442795d840b,DISK] 2024-12-09T11:21:06,539 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46359,DS-8f8100b1-22b1-43bf-9468-405dfca7481e,DISK] 2024-12-09T11:21:06,540 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44093,DS-049b5d3a-b506-46ca-8e7a-3fb74c0e7d3e,DISK] 2024-12-09T11:21:06,541 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733743266468/wal.1733743266523 2024-12-09T11:21:06,541 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39935:39935),(127.0.0.1/127.0.0.1:39985:39985),(127.0.0.1/127.0.0.1:43581:43581)] 2024-12-09T11:21:06,541 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 7936c6f8ca28c09f50ba0ee7d098d193, NAME => 'testReplayEditsWrittenViaHRegion,,1733743266470.7936c6f8ca28c09f50ba0ee7d098d193.', STARTKEY => '', ENDKEY => ''} 2024-12-09T11:21:06,542 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1733743266470.7936c6f8ca28c09f50ba0ee7d098d193.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:21:06,542 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 7936c6f8ca28c09f50ba0ee7d098d193 2024-12-09T11:21:06,542 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 7936c6f8ca28c09f50ba0ee7d098d193 2024-12-09T11:21:06,543 INFO [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 7936c6f8ca28c09f50ba0ee7d098d193 2024-12-09T11:21:06,544 INFO [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7936c6f8ca28c09f50ba0ee7d098d193 columnFamilyName a 2024-12-09T11:21:06,544 DEBUG [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:21:06,544 INFO [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] regionserver.HStore(327): Store=7936c6f8ca28c09f50ba0ee7d098d193/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:21:06,544 INFO [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 7936c6f8ca28c09f50ba0ee7d098d193 2024-12-09T11:21:06,545 INFO [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7936c6f8ca28c09f50ba0ee7d098d193 columnFamilyName b 2024-12-09T11:21:06,545 DEBUG [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:21:06,545 INFO [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] regionserver.HStore(327): Store=7936c6f8ca28c09f50ba0ee7d098d193/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:21:06,545 INFO [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 7936c6f8ca28c09f50ba0ee7d098d193 2024-12-09T11:21:06,546 INFO [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7936c6f8ca28c09f50ba0ee7d098d193 columnFamilyName c 2024-12-09T11:21:06,546 DEBUG [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:21:06,546 INFO [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] regionserver.HStore(327): Store=7936c6f8ca28c09f50ba0ee7d098d193/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:21:06,547 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 7936c6f8ca28c09f50ba0ee7d098d193 2024-12-09T11:21:06,547 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193 2024-12-09T11:21:06,548 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193 2024-12-09T11:21:06,549 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 7936c6f8ca28c09f50ba0ee7d098d193 2024-12-09T11:21:06,549 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 7936c6f8ca28c09f50ba0ee7d098d193 2024-12-09T11:21:06,549 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-09T11:21:06,550 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 7936c6f8ca28c09f50ba0ee7d098d193 2024-12-09T11:21:06,551 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 7936c6f8ca28c09f50ba0ee7d098d193; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67110771, jitterRate=2.841651439666748E-5}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-09T11:21:06,551 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 7936c6f8ca28c09f50ba0ee7d098d193: Writing region info on filesystem at 1733743266542Initializing all the Stores at 1733743266542Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743266543 (+1 ms)Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743266543Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743266543Cleaning up temporary data from old regions at 1733743266549 (+6 ms)Region opened successfully at 1733743266551 (+2 ms) 2024-12-09T11:21:06,559 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 7936c6f8ca28c09f50ba0ee7d098d193 3/3 column families, dataSize=870 B heapSize=2.31 KB 2024-12-09T11:21:06,574 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/.tmp/a/8f9563f5a7ba4fd6b11616cbe97813b5 is 91, key is testReplayEditsWrittenViaHRegion/a:x0/1733743266552/Put/seqid=0 2024-12-09T11:21:06,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741933_1112 (size=5958) 2024-12-09T11:21:06,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741933_1112 (size=5958) 2024-12-09T11:21:06,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741933_1112 (size=5958) 2024-12-09T11:21:06,581 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/.tmp/a/8f9563f5a7ba4fd6b11616cbe97813b5 2024-12-09T11:21:06,586 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/.tmp/a/8f9563f5a7ba4fd6b11616cbe97813b5 as hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/a/8f9563f5a7ba4fd6b11616cbe97813b5 2024-12-09T11:21:06,591 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/a/8f9563f5a7ba4fd6b11616cbe97813b5, entries=10, sequenceid=13, filesize=5.8 K 2024-12-09T11:21:06,592 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~870 B/870, heapSize ~1.80 KB/1840, currentSize=0 B/0 for 7936c6f8ca28c09f50ba0ee7d098d193 in 33ms, sequenceid=13, compaction requested=false 2024-12-09T11:21:06,592 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 7936c6f8ca28c09f50ba0ee7d098d193: 2024-12-09T11:21:06,612 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 7936c6f8ca28c09f50ba0ee7d098d193, disabling compactions & flushes 2024-12-09T11:21:06,612 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1733743266470.7936c6f8ca28c09f50ba0ee7d098d193. 2024-12-09T11:21:06,612 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1733743266470.7936c6f8ca28c09f50ba0ee7d098d193. 2024-12-09T11:21:06,612 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1733743266470.7936c6f8ca28c09f50ba0ee7d098d193. after waiting 0 ms 2024-12-09T11:21:06,612 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1733743266470.7936c6f8ca28c09f50ba0ee7d098d193. 2024-12-09T11:21:06,613 ERROR [Time-limited test {}] regionserver.HRegion(1960): Memstore data size is 1740 in region testReplayEditsWrittenViaHRegion,,1733743266470.7936c6f8ca28c09f50ba0ee7d098d193. 2024-12-09T11:21:06,613 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1733743266470.7936c6f8ca28c09f50ba0ee7d098d193. 2024-12-09T11:21:06,613 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 7936c6f8ca28c09f50ba0ee7d098d193: Waiting for close lock at 1733743266611Disabling compacts and flushes for region at 1733743266611Disabling writes for close at 1733743266612 (+1 ms)Writing region close event to WAL at 1733743266612Closed at 1733743266613 (+1 ms) 2024-12-09T11:21:06,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741932_1111 (size=3346) 2024-12-09T11:21:06,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741932_1111 (size=3346) 2024-12-09T11:21:06,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741932_1111 (size=3346) 2024-12-09T11:21:06,640 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:40493/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733743266468/wal.1733743266523, size=3.3 K (3346bytes) 2024-12-09T11:21:06,640 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40493/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733743266468/wal.1733743266523 2024-12-09T11:21:06,640 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:40493/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733743266468/wal.1733743266523 after 0ms 2024-12-09T11:21:06,642 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:40493/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733743266468/wal.1733743266523: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-09T11:21:06,643 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:40493/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733743266468/wal.1733743266523 took 4ms 2024-12-09T11:21:06,645 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:40493/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733743266468/wal.1733743266523 so closing down 2024-12-09T11:21:06,645 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-09T11:21:06,646 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1733743266523.temp 2024-12-09T11:21:06,683 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/recovered.edits/0000000000000000003-wal.1733743266523.temp 2024-12-09T11:21:06,684 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-09T11:21:06,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741934_1113 (size=2944) 2024-12-09T11:21:06,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741934_1113 (size=2944) 2024-12-09T11:21:06,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741934_1113 (size=2944) 2024-12-09T11:21:06,700 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/recovered.edits/0000000000000000003-wal.1733743266523.temp (wrote 30 edits, skipped 0 edits in 0 ms) 2024-12-09T11:21:06,702 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/recovered.edits/0000000000000000003-wal.1733743266523.temp to hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/recovered.edits/0000000000000000035 2024-12-09T11:21:06,702 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 32 edits across 1 Regions in 59 ms; skipped=2; WAL=hdfs://localhost:40493/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733743266468/wal.1733743266523, size=3.3 K, length=3346, corrupted=false, cancelled=false 2024-12-09T11:21:06,702 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:40493/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733743266468/wal.1733743266523, journal: Splitting hdfs://localhost:40493/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733743266468/wal.1733743266523, size=3.3 K (3346bytes) at 1733743266640Finishing writing output for hdfs://localhost:40493/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733743266468/wal.1733743266523 so closing down at 1733743266645 (+5 ms)Creating recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/recovered.edits/0000000000000000003-wal.1733743266523.temp at 1733743266683 (+38 ms)3 split writer threads finished at 1733743266684 (+1 ms)Closed recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/recovered.edits/0000000000000000003-wal.1733743266523.temp (wrote 30 edits, skipped 0 edits in 0 ms) at 1733743266700 (+16 ms)Rename recovered edits hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/recovered.edits/0000000000000000003-wal.1733743266523.temp to hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/recovered.edits/0000000000000000035 at 1733743266702 (+2 ms)Processed 32 edits across 1 Regions in 59 ms; skipped=2; WAL=hdfs://localhost:40493/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733743266468/wal.1733743266523, size=3.3 K, length=3346, corrupted=false, cancelled=false at 1733743266702 2024-12-09T11:21:06,704 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:40493/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733743266468/wal.1733743266523 to hdfs://localhost:40493/hbase/oldWALs/wal.1733743266523 2024-12-09T11:21:06,705 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/recovered.edits/0000000000000000035 2024-12-09T11:21:06,705 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-09T11:21:06,706 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:40493/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733743266468, archiveDir=hdfs://localhost:40493/hbase/oldWALs, maxLogs=32 2024-12-09T11:21:06,721 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733743266468/wal.1733743266707, exclude list is [], retry=0 2024-12-09T11:21:06,723 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34459,DS-f790a494-97c1-4864-a7b1-6442795d840b,DISK] 2024-12-09T11:21:06,723 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44093,DS-049b5d3a-b506-46ca-8e7a-3fb74c0e7d3e,DISK] 2024-12-09T11:21:06,723 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46359,DS-8f8100b1-22b1-43bf-9468-405dfca7481e,DISK] 2024-12-09T11:21:06,725 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733743266468/wal.1733743266707 2024-12-09T11:21:06,725 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39935:39935),(127.0.0.1/127.0.0.1:43581:43581),(127.0.0.1/127.0.0.1:39985:39985)] 2024-12-09T11:21:06,725 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 7936c6f8ca28c09f50ba0ee7d098d193, NAME => 'testReplayEditsWrittenViaHRegion,,1733743266470.7936c6f8ca28c09f50ba0ee7d098d193.', STARTKEY => '', ENDKEY => ''} 2024-12-09T11:21:06,725 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1733743266470.7936c6f8ca28c09f50ba0ee7d098d193.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:21:06,725 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 7936c6f8ca28c09f50ba0ee7d098d193 2024-12-09T11:21:06,725 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 7936c6f8ca28c09f50ba0ee7d098d193 2024-12-09T11:21:06,727 INFO [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 7936c6f8ca28c09f50ba0ee7d098d193 2024-12-09T11:21:06,727 INFO [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7936c6f8ca28c09f50ba0ee7d098d193 columnFamilyName a 2024-12-09T11:21:06,728 DEBUG [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:21:06,733 DEBUG [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/a/8f9563f5a7ba4fd6b11616cbe97813b5 2024-12-09T11:21:06,733 INFO [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] regionserver.HStore(327): Store=7936c6f8ca28c09f50ba0ee7d098d193/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:21:06,733 INFO [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 7936c6f8ca28c09f50ba0ee7d098d193 2024-12-09T11:21:06,734 INFO [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7936c6f8ca28c09f50ba0ee7d098d193 columnFamilyName b 2024-12-09T11:21:06,734 DEBUG [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:21:06,735 INFO [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] regionserver.HStore(327): Store=7936c6f8ca28c09f50ba0ee7d098d193/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:21:06,735 INFO [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 7936c6f8ca28c09f50ba0ee7d098d193 2024-12-09T11:21:06,735 INFO [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7936c6f8ca28c09f50ba0ee7d098d193 columnFamilyName c 2024-12-09T11:21:06,735 DEBUG [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:21:06,736 INFO [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] regionserver.HStore(327): Store=7936c6f8ca28c09f50ba0ee7d098d193/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:21:06,736 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 7936c6f8ca28c09f50ba0ee7d098d193 2024-12-09T11:21:06,737 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193 2024-12-09T11:21:06,738 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193 2024-12-09T11:21:06,739 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/recovered.edits/0000000000000000035 2024-12-09T11:21:06,741 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/recovered.edits/0000000000000000035: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-09T11:21:06,742 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 20, skipped 10, firstSequenceIdInLog=3, maxSequenceIdInLog=35, path=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/recovered.edits/0000000000000000035 2024-12-09T11:21:06,742 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 7936c6f8ca28c09f50ba0ee7d098d193 3/3 column families, dataSize=1.70 KB heapSize=3.88 KB 2024-12-09T11:21:06,757 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/.tmp/b/ec64ce337137496fb57074bc53cde56d is 91, key is testReplayEditsWrittenViaHRegion/b:x0/1733743266592/Put/seqid=0 2024-12-09T11:21:06,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741936_1115 (size=5958) 2024-12-09T11:21:06,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741936_1115 (size=5958) 2024-12-09T11:21:06,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741936_1115 (size=5958) 2024-12-09T11:21:06,764 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=35 (bloomFilter=true), to=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/.tmp/b/ec64ce337137496fb57074bc53cde56d 2024-12-09T11:21:06,782 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/.tmp/c/d9bd6f28871f4703ad343be17458f07d is 91, key is testReplayEditsWrittenViaHRegion/c:x0/1733743266600/Put/seqid=0 2024-12-09T11:21:06,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741937_1116 (size=5958) 2024-12-09T11:21:06,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741937_1116 (size=5958) 2024-12-09T11:21:06,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741937_1116 (size=5958) 2024-12-09T11:21:06,789 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=35 (bloomFilter=true), to=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/.tmp/c/d9bd6f28871f4703ad343be17458f07d 2024-12-09T11:21:06,794 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/.tmp/b/ec64ce337137496fb57074bc53cde56d as hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/b/ec64ce337137496fb57074bc53cde56d 2024-12-09T11:21:06,799 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/b/ec64ce337137496fb57074bc53cde56d, entries=10, sequenceid=35, filesize=5.8 K 2024-12-09T11:21:06,800 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/.tmp/c/d9bd6f28871f4703ad343be17458f07d as hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/c/d9bd6f28871f4703ad343be17458f07d 2024-12-09T11:21:06,805 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/c/d9bd6f28871f4703ad343be17458f07d, entries=10, sequenceid=35, filesize=5.8 K 2024-12-09T11:21:06,805 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.70 KB/1740, heapSize ~3.59 KB/3680, currentSize=0 B/0 for 7936c6f8ca28c09f50ba0ee7d098d193 in 63ms, sequenceid=35, compaction requested=false; wal=null 2024-12-09T11:21:06,806 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/recovered.edits/0000000000000000035 2024-12-09T11:21:06,807 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 7936c6f8ca28c09f50ba0ee7d098d193 2024-12-09T11:21:06,807 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 7936c6f8ca28c09f50ba0ee7d098d193 2024-12-09T11:21:06,808 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-09T11:21:06,809 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 7936c6f8ca28c09f50ba0ee7d098d193 2024-12-09T11:21:06,812 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/recovered.edits/35.seqid, newMaxSeqId=35, maxSeqId=1 2024-12-09T11:21:06,813 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 7936c6f8ca28c09f50ba0ee7d098d193; next sequenceid=36; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61292167, jitterRate=-0.08667553961277008}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-09T11:21:06,813 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 7936c6f8ca28c09f50ba0ee7d098d193: Writing region info on filesystem at 1733743266725Initializing all the Stores at 1733743266726 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743266726Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743266727 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743266727Obtaining lock to block concurrent updates at 1733743266742 (+15 ms)Preparing flush snapshotting stores in 7936c6f8ca28c09f50ba0ee7d098d193 at 1733743266742Finished memstore snapshotting testReplayEditsWrittenViaHRegion,,1733743266470.7936c6f8ca28c09f50ba0ee7d098d193., syncing WAL and waiting on mvcc, flushsize=dataSize=1740, getHeapSize=3920, getOffHeapSize=0, getCellsCount=20 at 1733743266742Flushing stores of testReplayEditsWrittenViaHRegion,,1733743266470.7936c6f8ca28c09f50ba0ee7d098d193. at 1733743266742Flushing 7936c6f8ca28c09f50ba0ee7d098d193/b: creating writer at 1733743266742Flushing 7936c6f8ca28c09f50ba0ee7d098d193/b: appending metadata at 1733743266757 (+15 ms)Flushing 7936c6f8ca28c09f50ba0ee7d098d193/b: closing flushed file at 1733743266757Flushing 7936c6f8ca28c09f50ba0ee7d098d193/c: creating writer at 1733743266768 (+11 ms)Flushing 7936c6f8ca28c09f50ba0ee7d098d193/c: appending metadata at 1733743266782 (+14 ms)Flushing 7936c6f8ca28c09f50ba0ee7d098d193/c: closing flushed file at 1733743266782Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@521cc46a: reopening flushed file at 1733743266793 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@58deb151: reopening flushed file at 1733743266799 (+6 ms)Finished flush of dataSize ~1.70 KB/1740, heapSize ~3.59 KB/3680, currentSize=0 B/0 for 7936c6f8ca28c09f50ba0ee7d098d193 in 63ms, sequenceid=35, compaction requested=false; wal=null at 1733743266805 (+6 ms)Cleaning up temporary data from old regions at 1733743266807 (+2 ms)Region opened successfully at 1733743266813 (+6 ms) 2024-12-09T11:21:06,878 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:40493/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733743266468/wal.1733743266707, size=0 (0bytes) 2024-12-09T11:21:06,878 WARN [Time-limited test {}] wal.WALSplitter(453): File hdfs://localhost:40493/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733743266468/wal.1733743266707 might be still open, length is 0 2024-12-09T11:21:06,878 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40493/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733743266468/wal.1733743266707 2024-12-09T11:21:06,879 WARN [IPC Server handler 0 on default port 40493 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733743266468/wal.1733743266707 has not been closed. Lease recovery is in progress. RecoveryId = 1117 for block blk_1073741935_1114 2024-12-09T11:21:06,879 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40493/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733743266468/wal.1733743266707 after 1ms 2024-12-09T11:21:08,147 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testDatalossWhenInputError 2024-12-09T11:21:08,147 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testDatalossWhenInputError Metrics about Tables on a single HBase RegionServer 2024-12-09T11:21:08,147 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testCompactedBulkLoadedFiles 2024-12-09T11:21:08,147 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testCompactedBulkLoadedFiles Metrics about Tables on a single HBase RegionServer 2024-12-09T11:21:08,415 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1633493794_22 at /127.0.0.1:37912 [Receiving block BP-32692473-172.17.0.3-1733743223895:blk_1073741935_1114] {}] datanode.DataXceiver(331): 127.0.0.1:34459:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37912 dst: /127.0.0.1:34459 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:34459 remote=/127.0.0.1:37912]. Total timeout mills is 60000, 58427 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:21:08,415 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1633493794_22 at /127.0.0.1:60192 [Receiving block BP-32692473-172.17.0.3-1733743223895:blk_1073741935_1114] {}] datanode.DataXceiver(331): 127.0.0.1:46359:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60192 dst: /127.0.0.1:46359 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:21:08,415 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1633493794_22 at /127.0.0.1:60642 [Receiving block BP-32692473-172.17.0.3-1733743223895:blk_1073741935_1114] {}] datanode.DataXceiver(331): 127.0.0.1:44093:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60642 dst: /127.0.0.1:44093 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:21:08,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741935_1117 (size=2936) 2024-12-09T11:21:08,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741935_1117 (size=2936) 2024-12-09T11:21:10,879 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:40493/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733743266468/wal.1733743266707 after 4001ms 2024-12-09T11:21:10,882 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:40493/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733743266468/wal.1733743266707: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-09T11:21:10,882 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:40493/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733743266468/wal.1733743266707 took 4004ms 2024-12-09T11:21:10,884 INFO [Time-limited test {}] wal.WALSplitter(493): EOF from hdfs://localhost:40493/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733743266468/wal.1733743266707; continuing. 2024-12-09T11:21:10,885 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:40493/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733743266468/wal.1733743266707 so closing down 2024-12-09T11:21:10,885 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-09T11:21:10,886 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000037-wal.1733743266707.temp 2024-12-09T11:21:10,887 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/recovered.edits/0000000000000000037-wal.1733743266707.temp 2024-12-09T11:21:10,887 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-09T11:21:10,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741938_1118 (size=2944) 2024-12-09T11:21:10,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741938_1118 (size=2944) 2024-12-09T11:21:10,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741938_1118 (size=2944) 2024-12-09T11:21:10,896 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/recovered.edits/0000000000000000037-wal.1733743266707.temp (wrote 30 edits, skipped 0 edits in 0 ms) 2024-12-09T11:21:10,897 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/recovered.edits/0000000000000000037-wal.1733743266707.temp to hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/recovered.edits/0000000000000000066 2024-12-09T11:21:10,897 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 30 edits across 1 Regions in 14 ms; skipped=0; WAL=hdfs://localhost:40493/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733743266468/wal.1733743266707, size=0, length=0, corrupted=false, cancelled=false 2024-12-09T11:21:10,897 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:40493/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733743266468/wal.1733743266707, journal: Splitting hdfs://localhost:40493/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733743266468/wal.1733743266707, size=0 (0bytes) at 1733743266878Finishing writing output for hdfs://localhost:40493/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733743266468/wal.1733743266707 so closing down at 1733743270885 (+4007 ms)Creating recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/recovered.edits/0000000000000000037-wal.1733743266707.temp at 1733743270887 (+2 ms)3 split writer threads finished at 1733743270887Closed recovered edits writer path=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/recovered.edits/0000000000000000037-wal.1733743266707.temp (wrote 30 edits, skipped 0 edits in 0 ms) at 1733743270896 (+9 ms)Rename recovered edits hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/recovered.edits/0000000000000000037-wal.1733743266707.temp to hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/recovered.edits/0000000000000000066 at 1733743270897 (+1 ms)Processed 30 edits across 1 Regions in 14 ms; skipped=0; WAL=hdfs://localhost:40493/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733743266468/wal.1733743266707, size=0, length=0, corrupted=false, cancelled=false at 1733743270897 2024-12-09T11:21:10,899 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:40493/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733743266468/wal.1733743266707 to hdfs://localhost:40493/hbase/oldWALs/wal.1733743266707 2024-12-09T11:21:10,900 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/recovered.edits/0000000000000000066 2024-12-09T11:21:10,900 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-09T11:21:10,902 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:40493/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733743266468, archiveDir=hdfs://localhost:40493/hbase/oldWALs, maxLogs=32 2024-12-09T11:21:10,915 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733743266468/wal.1733743270902, exclude list is [], retry=0 2024-12-09T11:21:10,918 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46359,DS-8f8100b1-22b1-43bf-9468-405dfca7481e,DISK] 2024-12-09T11:21:10,918 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34459,DS-f790a494-97c1-4864-a7b1-6442795d840b,DISK] 2024-12-09T11:21:10,918 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44093,DS-049b5d3a-b506-46ca-8e7a-3fb74c0e7d3e,DISK] 2024-12-09T11:21:10,920 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733743266468/wal.1733743270902 2024-12-09T11:21:10,920 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39985:39985),(127.0.0.1/127.0.0.1:39935:39935),(127.0.0.1/127.0.0.1:43581:43581)] 2024-12-09T11:21:10,920 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1733743266470.7936c6f8ca28c09f50ba0ee7d098d193.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:21:10,922 INFO [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 7936c6f8ca28c09f50ba0ee7d098d193 2024-12-09T11:21:10,923 INFO [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7936c6f8ca28c09f50ba0ee7d098d193 columnFamilyName a 2024-12-09T11:21:10,923 DEBUG [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:21:10,933 DEBUG [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/a/8f9563f5a7ba4fd6b11616cbe97813b5 2024-12-09T11:21:10,933 INFO [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] regionserver.HStore(327): Store=7936c6f8ca28c09f50ba0ee7d098d193/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:21:10,933 INFO [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 7936c6f8ca28c09f50ba0ee7d098d193 2024-12-09T11:21:10,934 INFO [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7936c6f8ca28c09f50ba0ee7d098d193 columnFamilyName b 2024-12-09T11:21:10,934 DEBUG [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:21:10,939 DEBUG [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/b/ec64ce337137496fb57074bc53cde56d 2024-12-09T11:21:10,939 INFO [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] regionserver.HStore(327): Store=7936c6f8ca28c09f50ba0ee7d098d193/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:21:10,939 INFO [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 7936c6f8ca28c09f50ba0ee7d098d193 2024-12-09T11:21:10,940 INFO [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7936c6f8ca28c09f50ba0ee7d098d193 columnFamilyName c 2024-12-09T11:21:10,940 DEBUG [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:21:10,945 DEBUG [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/c/d9bd6f28871f4703ad343be17458f07d 2024-12-09T11:21:10,945 INFO [StoreOpener-7936c6f8ca28c09f50ba0ee7d098d193-1 {}] regionserver.HStore(327): Store=7936c6f8ca28c09f50ba0ee7d098d193/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:21:10,946 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 7936c6f8ca28c09f50ba0ee7d098d193 2024-12-09T11:21:10,947 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193 2024-12-09T11:21:10,948 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193 2024-12-09T11:21:10,949 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/recovered.edits/0000000000000000066 2024-12-09T11:21:10,951 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/recovered.edits/0000000000000000066: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-09T11:21:10,957 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 30, skipped 0, firstSequenceIdInLog=37, maxSequenceIdInLog=66, path=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/recovered.edits/0000000000000000066 2024-12-09T11:21:10,957 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 7936c6f8ca28c09f50ba0ee7d098d193 3/3 column families, dataSize=2.55 KB heapSize=5.44 KB 2024-12-09T11:21:10,973 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/.tmp/a/ea6ca4fabf2340f690989e2969d0de97 is 91, key is testReplayEditsWrittenViaHRegion/a:y0/1733743266821/Put/seqid=0 2024-12-09T11:21:10,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741940_1120 (size=5958) 2024-12-09T11:21:10,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741940_1120 (size=5958) 2024-12-09T11:21:10,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741940_1120 (size=5958) 2024-12-09T11:21:10,981 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/.tmp/a/ea6ca4fabf2340f690989e2969d0de97 2024-12-09T11:21:11,000 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/.tmp/b/2c1b733120da4ce7abf5a93b9b08288b is 91, key is testReplayEditsWrittenViaHRegion/b:y0/1733743266828/Put/seqid=0 2024-12-09T11:21:11,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741941_1121 (size=5958) 2024-12-09T11:21:11,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741941_1121 (size=5958) 2024-12-09T11:21:11,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741941_1121 (size=5958) 2024-12-09T11:21:11,008 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/.tmp/b/2c1b733120da4ce7abf5a93b9b08288b 2024-12-09T11:21:11,027 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/.tmp/c/eaa188ef19c9445f9b22eecd8fad4ba9 is 91, key is testReplayEditsWrittenViaHRegion/c:y0/1733743266835/Put/seqid=0 2024-12-09T11:21:11,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741942_1122 (size=5958) 2024-12-09T11:21:11,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741942_1122 (size=5958) 2024-12-09T11:21:11,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741942_1122 (size=5958) 2024-12-09T11:21:11,036 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/.tmp/c/eaa188ef19c9445f9b22eecd8fad4ba9 2024-12-09T11:21:11,042 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/.tmp/a/ea6ca4fabf2340f690989e2969d0de97 as hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/a/ea6ca4fabf2340f690989e2969d0de97 2024-12-09T11:21:11,048 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/a/ea6ca4fabf2340f690989e2969d0de97, entries=10, sequenceid=66, filesize=5.8 K 2024-12-09T11:21:11,049 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/.tmp/b/2c1b733120da4ce7abf5a93b9b08288b as hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/b/2c1b733120da4ce7abf5a93b9b08288b 2024-12-09T11:21:11,054 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/b/2c1b733120da4ce7abf5a93b9b08288b, entries=10, sequenceid=66, filesize=5.8 K 2024-12-09T11:21:11,055 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/.tmp/c/eaa188ef19c9445f9b22eecd8fad4ba9 as hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/c/eaa188ef19c9445f9b22eecd8fad4ba9 2024-12-09T11:21:11,060 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/c/eaa188ef19c9445f9b22eecd8fad4ba9, entries=10, sequenceid=66, filesize=5.8 K 2024-12-09T11:21:11,061 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~2.55 KB/2610, heapSize ~5.39 KB/5520, currentSize=0 B/0 for 7936c6f8ca28c09f50ba0ee7d098d193 in 103ms, sequenceid=66, compaction requested=false; wal=null 2024-12-09T11:21:11,061 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/recovered.edits/0000000000000000066 2024-12-09T11:21:11,062 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 7936c6f8ca28c09f50ba0ee7d098d193 2024-12-09T11:21:11,063 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 7936c6f8ca28c09f50ba0ee7d098d193 2024-12-09T11:21:11,063 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-09T11:21:11,065 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 7936c6f8ca28c09f50ba0ee7d098d193 2024-12-09T11:21:11,067 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40493/hbase/data/default/testReplayEditsWrittenViaHRegion/7936c6f8ca28c09f50ba0ee7d098d193/recovered.edits/66.seqid, newMaxSeqId=66, maxSeqId=35 2024-12-09T11:21:11,068 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 7936c6f8ca28c09f50ba0ee7d098d193; next sequenceid=67; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66794261, jitterRate=-0.004687950015068054}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-09T11:21:11,068 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 7936c6f8ca28c09f50ba0ee7d098d193: Writing region info on filesystem at 1733743270920Initializing all the Stores at 1733743270921 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743270921Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743270921Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743270921Obtaining lock to block concurrent updates at 1733743270957 (+36 ms)Preparing flush snapshotting stores in 7936c6f8ca28c09f50ba0ee7d098d193 at 1733743270957Finished memstore snapshotting testReplayEditsWrittenViaHRegion,,1733743266470.7936c6f8ca28c09f50ba0ee7d098d193., syncing WAL and waiting on mvcc, flushsize=dataSize=2610, getHeapSize=5520, getOffHeapSize=0, getCellsCount=30 at 1733743270958 (+1 ms)Flushing stores of testReplayEditsWrittenViaHRegion,,1733743266470.7936c6f8ca28c09f50ba0ee7d098d193. at 1733743270958Flushing 7936c6f8ca28c09f50ba0ee7d098d193/a: creating writer at 1733743270958Flushing 7936c6f8ca28c09f50ba0ee7d098d193/a: appending metadata at 1733743270973 (+15 ms)Flushing 7936c6f8ca28c09f50ba0ee7d098d193/a: closing flushed file at 1733743270973Flushing 7936c6f8ca28c09f50ba0ee7d098d193/b: creating writer at 1733743270986 (+13 ms)Flushing 7936c6f8ca28c09f50ba0ee7d098d193/b: appending metadata at 1733743271000 (+14 ms)Flushing 7936c6f8ca28c09f50ba0ee7d098d193/b: closing flushed file at 1733743271000Flushing 7936c6f8ca28c09f50ba0ee7d098d193/c: creating writer at 1733743271012 (+12 ms)Flushing 7936c6f8ca28c09f50ba0ee7d098d193/c: appending metadata at 1733743271027 (+15 ms)Flushing 7936c6f8ca28c09f50ba0ee7d098d193/c: closing flushed file at 1733743271027Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@740d5175: reopening flushed file at 1733743271041 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2f4d6a0a: reopening flushed file at 1733743271048 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6f7c8d38: reopening flushed file at 1733743271054 (+6 ms)Finished flush of dataSize ~2.55 KB/2610, heapSize ~5.39 KB/5520, currentSize=0 B/0 for 7936c6f8ca28c09f50ba0ee7d098d193 in 103ms, sequenceid=66, compaction requested=false; wal=null at 1733743271061 (+7 ms)Cleaning up temporary data from old regions at 1733743271063 (+2 ms)Region opened successfully at 1733743271068 (+5 ms) 2024-12-09T11:21:11,084 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 7936c6f8ca28c09f50ba0ee7d098d193, disabling compactions & flushes 2024-12-09T11:21:11,084 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1733743266470.7936c6f8ca28c09f50ba0ee7d098d193. 2024-12-09T11:21:11,084 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1733743266470.7936c6f8ca28c09f50ba0ee7d098d193. 2024-12-09T11:21:11,084 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1733743266470.7936c6f8ca28c09f50ba0ee7d098d193. after waiting 0 ms 2024-12-09T11:21:11,084 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1733743266470.7936c6f8ca28c09f50ba0ee7d098d193. 2024-12-09T11:21:11,086 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1733743266470.7936c6f8ca28c09f50ba0ee7d098d193. 2024-12-09T11:21:11,087 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 7936c6f8ca28c09f50ba0ee7d098d193: Waiting for close lock at 1733743271083Disabling compacts and flushes for region at 1733743271083Disabling writes for close at 1733743271084 (+1 ms)Writing region close event to WAL at 1733743271086 (+2 ms)Closed at 1733743271086 2024-12-09T11:21:11,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741939_1119 (size=95) 2024-12-09T11:21:11,089 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733743266468/wal.1733743270902 not finished, retry = 0 2024-12-09T11:21:11,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741939_1119 (size=95) 2024-12-09T11:21:11,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741939_1119 (size=95) 2024-12-09T11:21:11,192 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-09T11:21:11,192 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1733743270902) 2024-12-09T11:21:11,207 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsWrittenViaHRegion Thread=442 (was 437) Potentially hanging thread: AsyncFSWAL-28-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1319272049) connection to localhost/127.0.0.1:40493 from jenkinstestReplayEditsWrittenViaHRegion java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_709535820_22 at /127.0.0.1:33026 [Waiting for operation #15] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-28-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-28-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkinstestReplayEditsWrittenViaHRegion@localhost:40493 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_709535820_22 at /127.0.0.1:55530 [Waiting for operation #13] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_709535820_22 at /127.0.0.1:56264 [Waiting for operation #15] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1404 (was 1339) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=439 (was 425) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=252 (was 296) 2024-12-09T11:21:11,207 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1404 is superior to 1024 2024-12-09T11:21:11,207 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T11:21:11,207 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T11:21:11,207 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T11:21:11,208 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:21:11,208 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:21:11,208 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T11:21:11,208 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T11:21:11,209 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=818882006, stopped=false 2024-12-09T11:21:11,209 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=2dff3a36d44f,42781,1733743227566 2024-12-09T11:21:11,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42781-0x1012ae9bf670000, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T11:21:11,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46367-0x1012ae9bf670003, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T11:21:11,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46259-0x1012ae9bf670001, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T11:21:11,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42781-0x1012ae9bf670000, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:21:11,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46367-0x1012ae9bf670003, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:21:11,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46259-0x1012ae9bf670001, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:21:11,211 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T11:21:11,212 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42781-0x1012ae9bf670000, quorum=127.0.0.1:56083, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T11:21:11,217 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T11:21:11,217 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T11:21:11,217 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:21:11,217 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2dff3a36d44f,46259,1733743228656' ***** 2024-12-09T11:21:11,217 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T11:21:11,217 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2dff3a36d44f,46367,1733743228871' ***** 2024-12-09T11:21:11,217 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T11:21:11,217 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46367-0x1012ae9bf670003, quorum=127.0.0.1:56083, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T11:21:11,217 INFO [RS:0;2dff3a36d44f:46259 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T11:21:11,217 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46259-0x1012ae9bf670001, quorum=127.0.0.1:56083, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T11:21:11,217 INFO [RS:2;2dff3a36d44f:46367 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T11:21:11,218 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T11:21:11,218 INFO [RS:0;2dff3a36d44f:46259 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T11:21:11,218 INFO [RS:0;2dff3a36d44f:46259 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T11:21:11,218 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T11:21:11,218 INFO [RS:0;2dff3a36d44f:46259 {}] regionserver.HRegionServer(3091): Received CLOSE for 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:21:11,218 INFO [RS:0;2dff3a36d44f:46259 {}] regionserver.HRegionServer(959): stopping server 2dff3a36d44f,46259,1733743228656 2024-12-09T11:21:11,218 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 4dc5ca2e3dd0f6286d8d8a4977d489a3, disabling compactions & flushes 2024-12-09T11:21:11,218 INFO [RS:0;2dff3a36d44f:46259 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T11:21:11,218 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. 2024-12-09T11:21:11,218 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. 2024-12-09T11:21:11,218 INFO [RS:0;2dff3a36d44f:46259 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;2dff3a36d44f:46259. 2024-12-09T11:21:11,218 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. after waiting 0 ms 2024-12-09T11:21:11,218 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. 2024-12-09T11:21:11,218 DEBUG [RS:0;2dff3a36d44f:46259 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T11:21:11,218 DEBUG [RS:0;2dff3a36d44f:46259 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:21:11,219 INFO [RS:0;2dff3a36d44f:46259 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-09T11:21:11,219 DEBUG [RS:0;2dff3a36d44f:46259 {}] regionserver.HRegionServer(1325): Online Regions={4dc5ca2e3dd0f6286d8d8a4977d489a3=testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3.} 2024-12-09T11:21:11,219 DEBUG [RS:0;2dff3a36d44f:46259 {}] regionserver.HRegionServer(1351): Waiting on 4dc5ca2e3dd0f6286d8d8a4977d489a3 2024-12-09T11:21:11,219 INFO [RS:2;2dff3a36d44f:46367 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T11:21:11,219 INFO [RS:2;2dff3a36d44f:46367 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T11:21:11,219 INFO [RS:2;2dff3a36d44f:46367 {}] regionserver.HRegionServer(959): stopping server 2dff3a36d44f,46367,1733743228871 2024-12-09T11:21:11,219 INFO [RS:2;2dff3a36d44f:46367 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T11:21:11,219 INFO [RS:2;2dff3a36d44f:46367 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;2dff3a36d44f:46367. 2024-12-09T11:21:11,219 DEBUG [RS:2;2dff3a36d44f:46367 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T11:21:11,219 DEBUG [RS:2;2dff3a36d44f:46367 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:21:11,219 INFO [RS:2;2dff3a36d44f:46367 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T11:21:11,219 INFO [RS:2;2dff3a36d44f:46367 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T11:21:11,219 INFO [RS:2;2dff3a36d44f:46367 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T11:21:11,219 INFO [RS:2;2dff3a36d44f:46367 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T11:21:11,221 INFO [RS:2;2dff3a36d44f:46367 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-09T11:21:11,221 DEBUG [RS:2;2dff3a36d44f:46367 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-09T11:21:11,221 DEBUG [RS:2;2dff3a36d44f:46367 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-09T11:21:11,221 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T11:21:11,221 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T11:21:11,221 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T11:21:11,221 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T11:21:11,221 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T11:21:11,221 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.19 KB heapSize=2.79 KB 2024-12-09T11:21:11,225 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/default/testReplayEditsAfterRegionMovedWithMultiCF/4dc5ca2e3dd0f6286d8d8a4977d489a3/recovered.edits/20.seqid, newMaxSeqId=20, maxSeqId=17 2024-12-09T11:21:11,227 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/hbase/meta/1588230740/.tmp/info/fe469e241eef48c48ba6264025d3d447 is 205, key is testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3./info:regioninfo/1733743249444/Put/seqid=0 2024-12-09T11:21:11,227 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. 2024-12-09T11:21:11,228 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 4dc5ca2e3dd0f6286d8d8a4977d489a3: Waiting for close lock at 1733743271218Running coprocessor pre-close hooks at 1733743271218Disabling compacts and flushes for region at 1733743271218Disabling writes for close at 1733743271218Writing region close event to WAL at 1733743271221 (+3 ms)Running coprocessor post-close hooks at 1733743271227 (+6 ms)Closed at 1733743271227 2024-12-09T11:21:11,228 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733743244883.4dc5ca2e3dd0f6286d8d8a4977d489a3. 2024-12-09T11:21:11,230 INFO [regionserver/2dff3a36d44f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T11:21:11,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741943_1123 (size=6778) 2024-12-09T11:21:11,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741943_1123 (size=6778) 2024-12-09T11:21:11,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741943_1123 (size=6778) 2024-12-09T11:21:11,237 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.19 KB at sequenceid=23 (bloomFilter=true), to=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/hbase/meta/1588230740/.tmp/info/fe469e241eef48c48ba6264025d3d447 2024-12-09T11:21:11,238 INFO [regionserver/2dff3a36d44f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T11:21:11,242 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/hbase/meta/1588230740/.tmp/info/fe469e241eef48c48ba6264025d3d447 as hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/hbase/meta/1588230740/info/fe469e241eef48c48ba6264025d3d447 2024-12-09T11:21:11,247 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/hbase/meta/1588230740/info/fe469e241eef48c48ba6264025d3d447, entries=8, sequenceid=23, filesize=6.6 K 2024-12-09T11:21:11,248 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.19 KB/1218, heapSize ~2.02 KB/2072, currentSize=0 B/0 for 1588230740 in 27ms, sequenceid=23, compaction requested=false 2024-12-09T11:21:11,252 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/data/hbase/meta/1588230740/recovered.edits/26.seqid, newMaxSeqId=26, maxSeqId=18 2024-12-09T11:21:11,252 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T11:21:11,252 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T11:21:11,253 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733743271221Running coprocessor pre-close hooks at 1733743271221Disabling compacts and flushes for region at 1733743271221Disabling writes for close at 1733743271221Obtaining lock to block concurrent updates at 1733743271221Preparing flush snapshotting stores in 1588230740 at 1733743271221Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1218, getHeapSize=2792, getOffHeapSize=0, getCellsCount=8 at 1733743271221Flushing stores of hbase:meta,,1.1588230740 at 1733743271222 (+1 ms)Flushing 1588230740/info: creating writer at 1733743271222Flushing 1588230740/info: appending metadata at 1733743271227 (+5 ms)Flushing 1588230740/info: closing flushed file at 1733743271227Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@124885bb: reopening flushed file at 1733743271242 (+15 ms)Finished flush of dataSize ~1.19 KB/1218, heapSize ~2.02 KB/2072, currentSize=0 B/0 for 1588230740 in 27ms, sequenceid=23, compaction requested=false at 1733743271248 (+6 ms)Writing region close event to WAL at 1733743271249 (+1 ms)Running coprocessor post-close hooks at 1733743271252 (+3 ms)Closed at 1733743271252 2024-12-09T11:21:11,253 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T11:21:11,419 INFO [RS:0;2dff3a36d44f:46259 {}] regionserver.HRegionServer(976): stopping server 2dff3a36d44f,46259,1733743228656; all regions closed. 2024-12-09T11:21:11,421 INFO [RS:2;2dff3a36d44f:46367 {}] regionserver.HRegionServer(976): stopping server 2dff3a36d44f,46367,1733743228871; all regions closed. 2024-12-09T11:21:11,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741834_1010 (size=2187) 2024-12-09T11:21:11,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741834_1010 (size=2187) 2024-12-09T11:21:11,427 DEBUG [RS:0;2dff3a36d44f:46259 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/oldWALs 2024-12-09T11:21:11,427 INFO [RS:0;2dff3a36d44f:46259 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 2dff3a36d44f%2C46259%2C1733743228656:(num 1733743230364) 2024-12-09T11:21:11,427 DEBUG [RS:0;2dff3a36d44f:46259 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:21:11,427 INFO [RS:0;2dff3a36d44f:46259 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T11:21:11,427 INFO [RS:0;2dff3a36d44f:46259 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T11:21:11,428 INFO [RS:0;2dff3a36d44f:46259 {}] hbase.ChoreService(370): Chore service for: regionserver/2dff3a36d44f:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-09T11:21:11,428 INFO [RS:0;2dff3a36d44f:46259 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T11:21:11,428 INFO [RS:0;2dff3a36d44f:46259 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T11:21:11,428 INFO [RS:0;2dff3a36d44f:46259 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T11:21:11,428 INFO [RS:0;2dff3a36d44f:46259 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T11:21:11,428 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T11:21:11,428 INFO [RS:0;2dff3a36d44f:46259 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:46259 2024-12-09T11:21:11,430 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46259-0x1012ae9bf670001, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2dff3a36d44f,46259,1733743228656 2024-12-09T11:21:11,430 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42781-0x1012ae9bf670000, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T11:21:11,430 INFO [RS:0;2dff3a36d44f:46259 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T11:21:11,431 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2dff3a36d44f,46259,1733743228656] 2024-12-09T11:21:11,433 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2dff3a36d44f,46259,1733743228656 already deleted, retry=false 2024-12-09T11:21:11,433 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2dff3a36d44f,46259,1733743228656 expired; onlineServers=1 2024-12-09T11:21:11,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741893_1071 (size=1675) 2024-12-09T11:21:11,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741893_1071 (size=1675) 2024-12-09T11:21:11,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741893_1071 (size=1675) 2024-12-09T11:21:11,442 DEBUG [RS:2;2dff3a36d44f:46367 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/oldWALs 2024-12-09T11:21:11,442 INFO [RS:2;2dff3a36d44f:46367 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 2dff3a36d44f%2C46367%2C1733743228871.meta:.meta(num 1733743248516) 2024-12-09T11:21:11,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741835_1011 (size=95) 2024-12-09T11:21:11,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741835_1011 (size=95) 2024-12-09T11:21:11,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741835_1011 (size=95) 2024-12-09T11:21:11,452 DEBUG [RS:2;2dff3a36d44f:46367 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/oldWALs 2024-12-09T11:21:11,452 INFO [RS:2;2dff3a36d44f:46367 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 2dff3a36d44f%2C46367%2C1733743228871:(num 1733743230365) 2024-12-09T11:21:11,452 DEBUG [RS:2;2dff3a36d44f:46367 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:21:11,452 INFO [RS:2;2dff3a36d44f:46367 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T11:21:11,453 INFO [RS:2;2dff3a36d44f:46367 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T11:21:11,453 INFO [RS:2;2dff3a36d44f:46367 {}] hbase.ChoreService(370): Chore service for: regionserver/2dff3a36d44f:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T11:21:11,453 INFO [RS:2;2dff3a36d44f:46367 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T11:21:11,453 INFO [RS:2;2dff3a36d44f:46367 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:46367 2024-12-09T11:21:11,453 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T11:21:11,455 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42781-0x1012ae9bf670000, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T11:21:11,455 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46367-0x1012ae9bf670003, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2dff3a36d44f,46367,1733743228871 2024-12-09T11:21:11,456 INFO [RS:2;2dff3a36d44f:46367 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T11:21:11,456 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2dff3a36d44f,46367,1733743228871] 2024-12-09T11:21:11,458 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2dff3a36d44f,46367,1733743228871 already deleted, retry=false 2024-12-09T11:21:11,458 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2dff3a36d44f,46367,1733743228871 expired; onlineServers=0 2024-12-09T11:21:11,458 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '2dff3a36d44f,42781,1733743227566' ***** 2024-12-09T11:21:11,458 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T11:21:11,458 INFO [M:0;2dff3a36d44f:42781 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T11:21:11,458 INFO [M:0;2dff3a36d44f:42781 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T11:21:11,458 DEBUG [M:0;2dff3a36d44f:42781 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T11:21:11,458 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T11:21:11,459 DEBUG [M:0;2dff3a36d44f:42781 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T11:21:11,459 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster-HFileCleaner.large.0-1733743230006 {}] cleaner.HFileCleaner(306): Exit Thread[master/2dff3a36d44f:0:becomeActiveMaster-HFileCleaner.large.0-1733743230006,5,FailOnTimeoutGroup] 2024-12-09T11:21:11,459 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster-HFileCleaner.small.0-1733743230007 {}] cleaner.HFileCleaner(306): Exit Thread[master/2dff3a36d44f:0:becomeActiveMaster-HFileCleaner.small.0-1733743230007,5,FailOnTimeoutGroup] 2024-12-09T11:21:11,459 INFO [M:0;2dff3a36d44f:42781 {}] hbase.ChoreService(370): Chore service for: master/2dff3a36d44f:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T11:21:11,459 INFO [M:0;2dff3a36d44f:42781 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T11:21:11,459 DEBUG [M:0;2dff3a36d44f:42781 {}] master.HMaster(1795): Stopping service threads 2024-12-09T11:21:11,459 INFO [M:0;2dff3a36d44f:42781 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T11:21:11,459 INFO [M:0;2dff3a36d44f:42781 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T11:21:11,460 INFO [M:0;2dff3a36d44f:42781 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T11:21:11,460 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T11:21:11,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42781-0x1012ae9bf670000, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T11:21:11,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42781-0x1012ae9bf670000, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:21:11,464 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-12-09T11:21:11,464 DEBUG [RegionServerTracker-0 {}] master.ActiveMasterManager(353): master:42781-0x1012ae9bf670000, quorum=127.0.0.1:56083, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-12-09T11:21:11,465 INFO [M:0;2dff3a36d44f:42781 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/.lastflushedseqids 2024-12-09T11:21:11,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741944_1124 (size=119) 2024-12-09T11:21:11,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741944_1124 (size=119) 2024-12-09T11:21:11,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741944_1124 (size=119) 2024-12-09T11:21:11,487 INFO [M:0;2dff3a36d44f:42781 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T11:21:11,487 INFO [M:0;2dff3a36d44f:42781 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T11:21:11,487 DEBUG [M:0;2dff3a36d44f:42781 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T11:21:11,487 INFO [M:0;2dff3a36d44f:42781 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:21:11,487 DEBUG [M:0;2dff3a36d44f:42781 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:21:11,487 DEBUG [M:0;2dff3a36d44f:42781 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T11:21:11,487 DEBUG [M:0;2dff3a36d44f:42781 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:21:11,487 INFO [M:0;2dff3a36d44f:42781 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=83.40 KB heapSize=102.70 KB 2024-12-09T11:21:11,509 DEBUG [M:0;2dff3a36d44f:42781 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3783a7cda84046699f364946ab742be8 is 82, key is hbase:meta,,1/info:regioninfo/1733743248741/Put/seqid=0 2024-12-09T11:21:11,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741945_1125 (size=6063) 2024-12-09T11:21:11,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741945_1125 (size=6063) 2024-12-09T11:21:11,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741945_1125 (size=6063) 2024-12-09T11:21:11,531 INFO [M:0;2dff3a36d44f:42781 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1008 B at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3783a7cda84046699f364946ab742be8 2024-12-09T11:21:11,532 INFO [RS:0;2dff3a36d44f:46259 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T11:21:11,532 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46259-0x1012ae9bf670001, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T11:21:11,532 INFO [RS:0;2dff3a36d44f:46259 {}] regionserver.HRegionServer(1031): Exiting; stopping=2dff3a36d44f,46259,1733743228656; zookeeper connection closed. 2024-12-09T11:21:11,532 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46259-0x1012ae9bf670001, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T11:21:11,532 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7eb9c4f {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7eb9c4f 2024-12-09T11:21:11,557 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46367-0x1012ae9bf670003, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T11:21:11,558 INFO [RS:2;2dff3a36d44f:46367 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T11:21:11,558 INFO [RS:2;2dff3a36d44f:46367 {}] regionserver.HRegionServer(1031): Exiting; stopping=2dff3a36d44f,46367,1733743228871; zookeeper connection closed. 2024-12-09T11:21:11,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46367-0x1012ae9bf670003, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T11:21:11,561 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2437afec {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2437afec 2024-12-09T11:21:11,567 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-09T11:21:11,569 DEBUG [M:0;2dff3a36d44f:42781 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/dae0ca7677de4ad18758d36e55eb5b0e is 1076, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733743245408/Put/seqid=0 2024-12-09T11:21:11,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741946_1126 (size=7907) 2024-12-09T11:21:11,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741946_1126 (size=7907) 2024-12-09T11:21:11,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741946_1126 (size=7907) 2024-12-09T11:21:11,601 INFO [M:0;2dff3a36d44f:42781 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=82.17 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/dae0ca7677de4ad18758d36e55eb5b0e 2024-12-09T11:21:11,607 INFO [M:0;2dff3a36d44f:42781 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for dae0ca7677de4ad18758d36e55eb5b0e 2024-12-09T11:21:11,630 DEBUG [M:0;2dff3a36d44f:42781 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1a3676ea44f549ed877e3c00b47f99f0 is 69, key is 2dff3a36d44f,46259,1733743228656/rs:state/1733743230086/Put/seqid=0 2024-12-09T11:21:11,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741947_1127 (size=5440) 2024-12-09T11:21:11,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741947_1127 (size=5440) 2024-12-09T11:21:11,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741947_1127 (size=5440) 2024-12-09T11:21:11,654 INFO [M:0;2dff3a36d44f:42781 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=249 B at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1a3676ea44f549ed877e3c00b47f99f0 2024-12-09T11:21:11,660 INFO [M:0;2dff3a36d44f:42781 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 1a3676ea44f549ed877e3c00b47f99f0 2024-12-09T11:21:11,661 DEBUG [M:0;2dff3a36d44f:42781 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3783a7cda84046699f364946ab742be8 as hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/3783a7cda84046699f364946ab742be8 2024-12-09T11:21:11,667 INFO [M:0;2dff3a36d44f:42781 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/3783a7cda84046699f364946ab742be8, entries=14, sequenceid=207, filesize=5.9 K 2024-12-09T11:21:11,668 DEBUG [M:0;2dff3a36d44f:42781 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/dae0ca7677de4ad18758d36e55eb5b0e as hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/dae0ca7677de4ad18758d36e55eb5b0e 2024-12-09T11:21:11,673 INFO [M:0;2dff3a36d44f:42781 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for dae0ca7677de4ad18758d36e55eb5b0e 2024-12-09T11:21:11,673 INFO [M:0;2dff3a36d44f:42781 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/dae0ca7677de4ad18758d36e55eb5b0e, entries=21, sequenceid=207, filesize=7.7 K 2024-12-09T11:21:11,674 DEBUG [M:0;2dff3a36d44f:42781 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1a3676ea44f549ed877e3c00b47f99f0 as hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1a3676ea44f549ed877e3c00b47f99f0 2024-12-09T11:21:11,678 INFO [M:0;2dff3a36d44f:42781 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 1a3676ea44f549ed877e3c00b47f99f0 2024-12-09T11:21:11,678 INFO [M:0;2dff3a36d44f:42781 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40493/user/jenkins/test-data/c4c5cf2a-13a5-23bb-b721-7601948fa7de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1a3676ea44f549ed877e3c00b47f99f0, entries=3, sequenceid=207, filesize=5.3 K 2024-12-09T11:21:11,680 INFO [M:0;2dff3a36d44f:42781 {}] regionserver.HRegion(3140): Finished flush of dataSize ~83.40 KB/85398, heapSize ~102.41 KB/104864, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 193ms, sequenceid=207, compaction requested=false 2024-12-09T11:21:11,699 INFO [M:0;2dff3a36d44f:42781 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:21:11,699 DEBUG [M:0;2dff3a36d44f:42781 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733743271487Disabling compacts and flushes for region at 1733743271487Disabling writes for close at 1733743271487Obtaining lock to block concurrent updates at 1733743271487Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733743271488 (+1 ms)Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=85398, getHeapSize=105104, getOffHeapSize=0, getCellsCount=248 at 1733743271488Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733743271489 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733743271489Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733743271509 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733743271509Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733743271538 (+29 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733743271568 (+30 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733743271568Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733743271607 (+39 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733743271629 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733743271629Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@24822cfe: reopening flushed file at 1733743271660 (+31 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3783ac8b: reopening flushed file at 1733743271667 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@41a015e2: reopening flushed file at 1733743271673 (+6 ms)Finished flush of dataSize ~83.40 KB/85398, heapSize ~102.41 KB/104864, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 193ms, sequenceid=207, compaction requested=false at 1733743271680 (+7 ms)Writing region close event to WAL at 1733743271699 (+19 ms)Closed at 1733743271699 2024-12-09T11:21:11,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44093 is added to blk_1073741830_1006 (size=69545) 2024-12-09T11:21:11,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34459 is added to blk_1073741830_1006 (size=69545) 2024-12-09T11:21:11,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46359 is added to blk_1073741830_1006 (size=69545) 2024-12-09T11:21:11,704 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T11:21:11,704 INFO [M:0;2dff3a36d44f:42781 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T11:21:11,704 INFO [M:0;2dff3a36d44f:42781 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:42781 2024-12-09T11:21:11,704 INFO [M:0;2dff3a36d44f:42781 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T11:21:11,806 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42781-0x1012ae9bf670000, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T11:21:11,806 INFO [M:0;2dff3a36d44f:42781 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T11:21:11,806 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42781-0x1012ae9bf670000, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T11:21:11,815 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733743250684/wal.1733743260052 with renewLeaseKey: DEFAULT_16688 java.io.FileNotFoundException: File does not exist: /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733743250684/wal.1733743260052 (inode 16688) Holder DFSClient_NONMAPREDUCE_-1633493794_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733743250684/wal.1733743260052 (inode 16688) Holder DFSClient_NONMAPREDUCE_-1633493794_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 25 more 2024-12-09T11:21:11,818 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733743249526/wal.1733743250453 with renewLeaseKey: DEFAULT_16665 java.io.FileNotFoundException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733743249526/wal.1733743250453 (inode 16665) Holder DFSClient_NONMAPREDUCE_-1633493794_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733743249526/wal.1733743250453 (inode 16665) Holder DFSClient_NONMAPREDUCE_-1633493794_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 25 more 2024-12-09T11:21:11,819 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733743240333/wal.1733743240452 with renewLeaseKey: DEFAULT_16586 java.io.IOException: stream already broken at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.endBlock(FanOutOneBlockAsyncDFSOutput.java:566) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:615) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:21:11,822 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testdatalosswheninputerror-manual,16010,1733743260314/wal.1733743260851 with renewLeaseKey: DEFAULT_16714 java.io.FileNotFoundException: File does not exist: /hbase/WALs/testdatalosswheninputerror-manual,16010,1733743260314/wal.1733743260851 (inode 16714) Holder DFSClient_NONMAPREDUCE_-1633493794_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testdatalosswheninputerror-manual,16010,1733743260314/wal.1733743260851 (inode 16714) Holder DFSClient_NONMAPREDUCE_-1633493794_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 25 more 2024-12-09T11:21:11,823 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733743261026/wal.1733743261526 with renewLeaseKey: DEFAULT_16736 java.io.IOException: stream already broken at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.endBlock(FanOutOneBlockAsyncDFSOutput.java:566) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:615) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:21:11,823 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733743266468/wal.1733743266707 with renewLeaseKey: DEFAULT_16777 java.io.IOException: stream already broken at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.endBlock(FanOutOneBlockAsyncDFSOutput.java:566) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:615) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:21:11,823 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733743232595/wal.1733743232680 with renewLeaseKey: DEFAULT_16506 java.io.IOException: stream already broken at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.endBlock(FanOutOneBlockAsyncDFSOutput.java:566) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:615) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:21:11,825 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testnameconflictwhensplit1-manual,16010,1733743232182/wal.1733743232383 with renewLeaseKey: DEFAULT_16485 java.io.FileNotFoundException: File does not exist: /hbase/WALs/testnameconflictwhensplit1-manual,16010,1733743232182/wal.1733743232383 (inode 16485) Holder DFSClient_NONMAPREDUCE_-1633493794_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testnameconflictwhensplit1-manual,16010,1733743232182/wal.1733743232383 (inode 16485) Holder DFSClient_NONMAPREDUCE_-1633493794_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 25 more 2024-12-09T11:21:11,828 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testnameconflictwhensplit0-manual,16010,1733743231211/wal.1733743231905 with renewLeaseKey: DEFAULT_16462 java.io.FileNotFoundException: File does not exist: /hbase/WALs/testnameconflictwhensplit0-manual,16010,1733743231211/wal.1733743231905 (inode 16462) Holder DFSClient_NONMAPREDUCE_-1633493794_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testnameconflictwhensplit0-manual,16010,1733743231211/wal.1733743231905 (inode 16462) Holder DFSClient_NONMAPREDUCE_-1633493794_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 25 more 2024-12-09T11:21:11,845 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@35f1150e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:21:11,848 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@13a77e13{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T11:21:11,848 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T11:21:11,848 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@f9972d0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T11:21:11,849 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2a6d5e13{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/hadoop.log.dir/,STOPPED} 2024-12-09T11:21:11,852 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T11:21:11,852 WARN [BP-32692473-172.17.0.3-1733743223895 heartbeating to localhost/127.0.0.1:40493 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T11:21:11,852 WARN [BP-32692473-172.17.0.3-1733743223895 heartbeating to localhost/127.0.0.1:40493 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-32692473-172.17.0.3-1733743223895 (Datanode Uuid 44c08323-bdb9-45d4-9f31-9a5f0d6767d1) service to localhost/127.0.0.1:40493 2024-12-09T11:21:11,852 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T11:21:11,853 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/cluster_b02fd66c-c8dc-38fe-f31f-89876c0daa74/data/data5/current/BP-32692473-172.17.0.3-1733743223895 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:21:11,853 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/cluster_b02fd66c-c8dc-38fe-f31f-89876c0daa74/data/data6/current/BP-32692473-172.17.0.3-1733743223895 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:21:11,854 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T11:21:11,858 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7bd427b8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:21:11,858 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6915083f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T11:21:11,858 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T11:21:11,858 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5cc2d6b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T11:21:11,858 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@46b092e1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/hadoop.log.dir/,STOPPED} 2024-12-09T11:21:11,860 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T11:21:11,860 WARN [BP-32692473-172.17.0.3-1733743223895 heartbeating to localhost/127.0.0.1:40493 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T11:21:11,860 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T11:21:11,860 WARN [BP-32692473-172.17.0.3-1733743223895 heartbeating to localhost/127.0.0.1:40493 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-32692473-172.17.0.3-1733743223895 (Datanode Uuid db252fff-4d65-475d-a955-c1284fc1dd58) service to localhost/127.0.0.1:40493 2024-12-09T11:21:11,861 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/cluster_b02fd66c-c8dc-38fe-f31f-89876c0daa74/data/data3/current/BP-32692473-172.17.0.3-1733743223895 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:21:11,861 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/cluster_b02fd66c-c8dc-38fe-f31f-89876c0daa74/data/data4/current/BP-32692473-172.17.0.3-1733743223895 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:21:11,861 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T11:21:11,864 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@330740de{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:21:11,865 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7b24cab9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T11:21:11,865 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T11:21:11,865 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a359997{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T11:21:11,865 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@cf5a85e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/hadoop.log.dir/,STOPPED} 2024-12-09T11:21:11,866 WARN [BP-32692473-172.17.0.3-1733743223895 heartbeating to localhost/127.0.0.1:40493 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T11:21:11,866 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T11:21:11,867 WARN [BP-32692473-172.17.0.3-1733743223895 heartbeating to localhost/127.0.0.1:40493 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-32692473-172.17.0.3-1733743223895 (Datanode Uuid 4ec6cf5b-f9e4-4fa5-9dd8-0357ae5c8876) service to localhost/127.0.0.1:40493 2024-12-09T11:21:11,867 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T11:21:11,867 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/cluster_b02fd66c-c8dc-38fe-f31f-89876c0daa74/data/data1/current/BP-32692473-172.17.0.3-1733743223895 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:21:11,867 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/cluster_b02fd66c-c8dc-38fe-f31f-89876c0daa74/data/data2/current/BP-32692473-172.17.0.3-1733743223895 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:21:11,868 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T11:21:11,876 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3717288f{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T11:21:11,877 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4bd70930{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T11:21:11,877 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T11:21:11,877 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6dc9d5c1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T11:21:11,877 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4f37ffca{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f55ad7-7a3c-1b73-1576-60631f599784/hadoop.log.dir/,STOPPED} 2024-12-09T11:21:11,888 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T11:21:11,943 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down