2024-12-10 16:34:16,590 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@20b2475a 2024-12-10 16:34:16,605 main DEBUG Took 0.012675 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-10 16:34:16,606 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-10 16:34:16,606 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-10 16:34:16,608 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-10 16:34:16,609 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 16:34:16,618 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-10 16:34:16,633 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 16:34:16,635 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 16:34:16,636 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 16:34:16,636 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 16:34:16,637 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 16:34:16,637 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 16:34:16,638 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 16:34:16,639 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 16:34:16,639 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 16:34:16,640 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 16:34:16,641 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 16:34:16,641 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 16:34:16,642 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 16:34:16,642 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 16:34:16,643 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 16:34:16,643 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 16:34:16,643 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 16:34:16,644 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 16:34:16,644 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 16:34:16,644 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 16:34:16,645 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 16:34:16,645 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 16:34:16,646 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 16:34:16,646 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 16:34:16,647 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 16:34:16,647 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-10 16:34:16,649 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 16:34:16,650 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-10 16:34:16,652 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-10 16:34:16,653 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-10 16:34:16,654 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-10 16:34:16,655 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-10 16:34:16,663 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-10 16:34:16,666 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-10 16:34:16,668 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-10 16:34:16,668 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-10 16:34:16,669 main DEBUG createAppenders(={Console}) 2024-12-10 16:34:16,670 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@20b2475a initialized 2024-12-10 16:34:16,670 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@20b2475a 2024-12-10 16:34:16,670 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@20b2475a OK. 2024-12-10 16:34:16,671 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-10 16:34:16,671 main DEBUG OutputStream closed 2024-12-10 16:34:16,672 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-10 16:34:16,672 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-10 16:34:16,672 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@4310d43 OK 2024-12-10 16:34:16,752 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-10 16:34:16,754 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-10 16:34:16,756 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-10 16:34:16,757 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-10 16:34:16,758 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-10 16:34:16,758 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-10 16:34:16,759 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-10 16:34:16,759 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-10 16:34:16,760 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-10 16:34:16,760 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-10 16:34:16,761 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-10 16:34:16,761 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-10 16:34:16,761 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-10 16:34:16,762 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-10 16:34:16,762 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-10 16:34:16,762 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-10 16:34:16,763 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-10 16:34:16,764 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-10 16:34:16,766 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-10 16:34:16,767 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@12f9af83) with optional ClassLoader: null 2024-12-10 16:34:16,767 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-10 16:34:16,768 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@12f9af83] started OK. 2024-12-10T16:34:16,991 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1 2024-12-10 16:34:16,994 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-10 16:34:16,995 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-10T16:34:17,003 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay timeout: 13 mins 2024-12-10T16:34:17,011 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplayValueCompression timeout: 13 mins 2024-12-10T16:34:17,033 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-10T16:34:17,073 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-10T16:34:17,073 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-10T16:34:17,085 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-10T16:34:17,097 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/cluster_2abc8474-d2bc-97ae-7082-3f3155e63df1, deleteOnExit=true 2024-12-10T16:34:17,097 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-10T16:34:17,098 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/test.cache.data in system properties and HBase conf 2024-12-10T16:34:17,099 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/hadoop.tmp.dir in system properties and HBase conf 2024-12-10T16:34:17,099 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/hadoop.log.dir in system properties and HBase conf 2024-12-10T16:34:17,100 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-10T16:34:17,100 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-10T16:34:17,101 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-10T16:34:17,177 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-10T16:34:17,268 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-10T16:34:17,273 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-10T16:34:17,274 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-10T16:34:17,275 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-10T16:34:17,275 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-10T16:34:17,276 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-10T16:34:17,276 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-10T16:34:17,277 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-10T16:34:17,277 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-10T16:34:17,278 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-10T16:34:17,278 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/nfs.dump.dir in system properties and HBase conf 2024-12-10T16:34:17,279 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/java.io.tmpdir in system properties and HBase conf 2024-12-10T16:34:17,279 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-10T16:34:17,280 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-10T16:34:17,280 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-10T16:34:18,148 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-10T16:34:18,217 INFO [Time-limited test {}] log.Log(170): Logging initialized @2224ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-10T16:34:18,282 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T16:34:18,336 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-10T16:34:18,353 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-10T16:34:18,353 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-10T16:34:18,354 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-10T16:34:18,371 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T16:34:18,374 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4f37ffca{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/hadoop.log.dir/,AVAILABLE} 2024-12-10T16:34:18,375 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6dc9d5c1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-10T16:34:18,535 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3717288f{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/java.io.tmpdir/jetty-localhost-40915-hadoop-hdfs-3_4_1-tests_jar-_-any-6295331493781622908/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-10T16:34:18,541 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4bd70930{HTTP/1.1, (http/1.1)}{localhost:40915} 2024-12-10T16:34:18,541 INFO [Time-limited test {}] server.Server(415): Started @2549ms 2024-12-10T16:34:19,009 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T16:34:19,015 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-10T16:34:19,016 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-10T16:34:19,016 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-10T16:34:19,016 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-10T16:34:19,017 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@cf5a85e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/hadoop.log.dir/,AVAILABLE} 2024-12-10T16:34:19,018 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a359997{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-10T16:34:19,110 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@330740de{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/java.io.tmpdir/jetty-localhost-36321-hadoop-hdfs-3_4_1-tests_jar-_-any-6659444077718309875/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T16:34:19,111 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7b24cab9{HTTP/1.1, (http/1.1)}{localhost:36321} 2024-12-10T16:34:19,111 INFO [Time-limited test {}] server.Server(415): Started @3119ms 2024-12-10T16:34:19,154 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-10T16:34:19,257 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T16:34:19,264 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-10T16:34:19,265 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-10T16:34:19,265 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-10T16:34:19,265 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-10T16:34:19,269 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@46b092e1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/hadoop.log.dir/,AVAILABLE} 2024-12-10T16:34:19,269 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5cc2d6b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-10T16:34:19,391 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7bd427b8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/java.io.tmpdir/jetty-localhost-45391-hadoop-hdfs-3_4_1-tests_jar-_-any-7229055517162201575/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T16:34:19,392 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6915083f{HTTP/1.1, (http/1.1)}{localhost:45391} 2024-12-10T16:34:19,392 INFO [Time-limited test {}] server.Server(415): Started @3400ms 2024-12-10T16:34:19,394 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-10T16:34:19,439 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T16:34:19,444 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-10T16:34:19,446 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-10T16:34:19,446 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-10T16:34:19,446 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-10T16:34:19,447 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2a6d5e13{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/hadoop.log.dir/,AVAILABLE} 2024-12-10T16:34:19,448 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@f9972d0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-10T16:34:19,550 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@35f1150e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/java.io.tmpdir/jetty-localhost-45537-hadoop-hdfs-3_4_1-tests_jar-_-any-17177302552868257435/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T16:34:19,551 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@13a77e13{HTTP/1.1, (http/1.1)}{localhost:45537} 2024-12-10T16:34:19,551 INFO [Time-limited test {}] server.Server(415): Started @3559ms 2024-12-10T16:34:19,553 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-10T16:34:20,262 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/cluster_2abc8474-d2bc-97ae-7082-3f3155e63df1/data/data3/current/BP-1758511473-172.17.0.3-1733848457790/current, will proceed with Du for space computation calculation, 2024-12-10T16:34:20,262 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/cluster_2abc8474-d2bc-97ae-7082-3f3155e63df1/data/data4/current/BP-1758511473-172.17.0.3-1733848457790/current, will proceed with Du for space computation calculation, 2024-12-10T16:34:20,262 WARN [Thread-122 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/cluster_2abc8474-d2bc-97ae-7082-3f3155e63df1/data/data1/current/BP-1758511473-172.17.0.3-1733848457790/current, will proceed with Du for space computation calculation, 2024-12-10T16:34:20,262 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/cluster_2abc8474-d2bc-97ae-7082-3f3155e63df1/data/data2/current/BP-1758511473-172.17.0.3-1733848457790/current, will proceed with Du for space computation calculation, 2024-12-10T16:34:20,280 WARN [Thread-132 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/cluster_2abc8474-d2bc-97ae-7082-3f3155e63df1/data/data5/current/BP-1758511473-172.17.0.3-1733848457790/current, will proceed with Du for space computation calculation, 2024-12-10T16:34:20,285 WARN [Thread-133 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/cluster_2abc8474-d2bc-97ae-7082-3f3155e63df1/data/data6/current/BP-1758511473-172.17.0.3-1733848457790/current, will proceed with Du for space computation calculation, 2024-12-10T16:34:20,295 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-10T16:34:20,296 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-10T16:34:20,309 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-10T16:34:20,340 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xde1512bdcf4d665f with lease ID 0x230a5048ca44103: Processing first storage report for DS-c05f9a42-583c-4f58-bba4-77e13705c0bc from datanode DatanodeRegistration(127.0.0.1:42039, datanodeUuid=75c93e06-6591-4732-9326-3e4c05ccc6c9, infoPort=44417, infoSecurePort=0, ipcPort=45691, storageInfo=lv=-57;cid=testClusterID;nsid=914309492;c=1733848457790) 2024-12-10T16:34:20,341 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xde1512bdcf4d665f with lease ID 0x230a5048ca44103: from storage DS-c05f9a42-583c-4f58-bba4-77e13705c0bc node DatanodeRegistration(127.0.0.1:42039, datanodeUuid=75c93e06-6591-4732-9326-3e4c05ccc6c9, infoPort=44417, infoSecurePort=0, ipcPort=45691, storageInfo=lv=-57;cid=testClusterID;nsid=914309492;c=1733848457790), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-10T16:34:20,342 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x542255b46a2d47a9 with lease ID 0x230a5048ca44104: Processing first storage report for DS-ddfb4a98-1093-42ea-847d-0d86a4cd1023 from datanode DatanodeRegistration(127.0.0.1:46873, datanodeUuid=017dd4e6-ac3c-4432-a7b1-b0f7f2dba114, infoPort=42633, infoSecurePort=0, ipcPort=45921, storageInfo=lv=-57;cid=testClusterID;nsid=914309492;c=1733848457790) 2024-12-10T16:34:20,342 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x542255b46a2d47a9 with lease ID 0x230a5048ca44104: from storage DS-ddfb4a98-1093-42ea-847d-0d86a4cd1023 node DatanodeRegistration(127.0.0.1:46873, datanodeUuid=017dd4e6-ac3c-4432-a7b1-b0f7f2dba114, infoPort=42633, infoSecurePort=0, ipcPort=45921, storageInfo=lv=-57;cid=testClusterID;nsid=914309492;c=1733848457790), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-10T16:34:20,342 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcf68d6b37a177e9c with lease ID 0x230a5048ca44105: Processing first storage report for DS-3efadbaf-2f12-4fe8-9fd8-91ba1926bf09 from datanode DatanodeRegistration(127.0.0.1:43913, datanodeUuid=532bd787-74b5-4b08-b35e-f1360f5ce582, infoPort=36795, infoSecurePort=0, ipcPort=46845, storageInfo=lv=-57;cid=testClusterID;nsid=914309492;c=1733848457790) 2024-12-10T16:34:20,342 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcf68d6b37a177e9c with lease ID 0x230a5048ca44105: from storage DS-3efadbaf-2f12-4fe8-9fd8-91ba1926bf09 node DatanodeRegistration(127.0.0.1:43913, datanodeUuid=532bd787-74b5-4b08-b35e-f1360f5ce582, infoPort=36795, infoSecurePort=0, ipcPort=46845, storageInfo=lv=-57;cid=testClusterID;nsid=914309492;c=1733848457790), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-10T16:34:20,342 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xde1512bdcf4d665f with lease ID 0x230a5048ca44103: Processing first storage report for DS-db2a3e97-c8d0-4582-a060-d3e61dcfe9b6 from datanode DatanodeRegistration(127.0.0.1:42039, datanodeUuid=75c93e06-6591-4732-9326-3e4c05ccc6c9, infoPort=44417, infoSecurePort=0, ipcPort=45691, storageInfo=lv=-57;cid=testClusterID;nsid=914309492;c=1733848457790) 2024-12-10T16:34:20,343 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xde1512bdcf4d665f with lease ID 0x230a5048ca44103: from storage DS-db2a3e97-c8d0-4582-a060-d3e61dcfe9b6 node DatanodeRegistration(127.0.0.1:42039, datanodeUuid=75c93e06-6591-4732-9326-3e4c05ccc6c9, infoPort=44417, infoSecurePort=0, ipcPort=45691, storageInfo=lv=-57;cid=testClusterID;nsid=914309492;c=1733848457790), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-10T16:34:20,343 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x542255b46a2d47a9 with lease ID 0x230a5048ca44104: Processing first storage report for DS-472f01e7-8b85-4323-8bfd-831b417bb610 from datanode DatanodeRegistration(127.0.0.1:46873, datanodeUuid=017dd4e6-ac3c-4432-a7b1-b0f7f2dba114, infoPort=42633, infoSecurePort=0, ipcPort=45921, storageInfo=lv=-57;cid=testClusterID;nsid=914309492;c=1733848457790) 2024-12-10T16:34:20,343 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x542255b46a2d47a9 with lease ID 0x230a5048ca44104: from storage DS-472f01e7-8b85-4323-8bfd-831b417bb610 node DatanodeRegistration(127.0.0.1:46873, datanodeUuid=017dd4e6-ac3c-4432-a7b1-b0f7f2dba114, infoPort=42633, infoSecurePort=0, ipcPort=45921, storageInfo=lv=-57;cid=testClusterID;nsid=914309492;c=1733848457790), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-10T16:34:20,343 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcf68d6b37a177e9c with lease ID 0x230a5048ca44105: Processing first storage report for DS-1aa72d58-65ec-4203-8d53-341829b6e382 from datanode DatanodeRegistration(127.0.0.1:43913, datanodeUuid=532bd787-74b5-4b08-b35e-f1360f5ce582, infoPort=36795, infoSecurePort=0, ipcPort=46845, storageInfo=lv=-57;cid=testClusterID;nsid=914309492;c=1733848457790) 2024-12-10T16:34:20,343 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcf68d6b37a177e9c with lease ID 0x230a5048ca44105: from storage DS-1aa72d58-65ec-4203-8d53-341829b6e382 node DatanodeRegistration(127.0.0.1:43913, datanodeUuid=532bd787-74b5-4b08-b35e-f1360f5ce582, infoPort=36795, infoSecurePort=0, ipcPort=46845, storageInfo=lv=-57;cid=testClusterID;nsid=914309492;c=1733848457790), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-10T16:34:20,357 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1 2024-12-10T16:34:20,418 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/cluster_2abc8474-d2bc-97ae-7082-3f3155e63df1/zookeeper_0, clientPort=53765, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/cluster_2abc8474-d2bc-97ae-7082-3f3155e63df1/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/cluster_2abc8474-d2bc-97ae-7082-3f3155e63df1/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-10T16:34:20,426 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=53765 2024-12-10T16:34:20,435 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T16:34:20,437 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T16:34:20,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741825_1001 (size=7) 2024-12-10T16:34:20,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741825_1001 (size=7) 2024-12-10T16:34:20,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741825_1001 (size=7) 2024-12-10T16:34:21,032 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f with version=8 2024-12-10T16:34:21,032 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/hbase-staging 2024-12-10T16:34:21,294 INFO [Time-limited test {}] client.ConnectionUtils(128): master/4b7737f37de9:0 server-side Connection retries=45 2024-12-10T16:34:21,303 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T16:34:21,303 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-10T16:34:21,307 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-10T16:34:21,308 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T16:34:21,308 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-10T16:34:21,425 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-10T16:34:21,481 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-10T16:34:21,489 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-10T16:34:21,493 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-10T16:34:21,516 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 28224 (auto-detected) 2024-12-10T16:34:21,517 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:03 (auto-detected) 2024-12-10T16:34:21,537 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:42829 2024-12-10T16:34:21,559 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42829 connecting to ZooKeeper ensemble=127.0.0.1:53765 2024-12-10T16:34:21,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:428290x0, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-10T16:34:21,646 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42829-0x10010af1c860000 connected 2024-12-10T16:34:21,724 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T16:34:21,731 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T16:34:21,744 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42829-0x10010af1c860000, quorum=127.0.0.1:53765, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T16:34:21,748 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f, hbase.cluster.distributed=false 2024-12-10T16:34:21,769 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42829-0x10010af1c860000, quorum=127.0.0.1:53765, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-10T16:34:21,773 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42829 2024-12-10T16:34:21,773 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42829 2024-12-10T16:34:21,774 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42829 2024-12-10T16:34:21,774 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42829 2024-12-10T16:34:21,775 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42829 2024-12-10T16:34:21,861 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/4b7737f37de9:0 server-side Connection retries=45 2024-12-10T16:34:21,863 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T16:34:21,863 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-10T16:34:21,863 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-10T16:34:21,863 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T16:34:21,863 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-10T16:34:21,866 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-10T16:34:21,868 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-10T16:34:21,869 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:35753 2024-12-10T16:34:21,870 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35753 connecting to ZooKeeper ensemble=127.0.0.1:53765 2024-12-10T16:34:21,871 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T16:34:21,873 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T16:34:21,886 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:357530x0, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-10T16:34:21,887 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:357530x0, quorum=127.0.0.1:53765, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T16:34:21,887 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35753-0x10010af1c860001 connected 2024-12-10T16:34:21,892 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-10T16:34:21,899 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-10T16:34:21,902 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35753-0x10010af1c860001, quorum=127.0.0.1:53765, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-10T16:34:21,907 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35753-0x10010af1c860001, quorum=127.0.0.1:53765, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-10T16:34:21,907 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35753 2024-12-10T16:34:21,908 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35753 2024-12-10T16:34:21,908 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35753 2024-12-10T16:34:21,908 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35753 2024-12-10T16:34:21,909 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35753 2024-12-10T16:34:21,925 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/4b7737f37de9:0 server-side Connection retries=45 2024-12-10T16:34:21,925 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T16:34:21,926 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-10T16:34:21,926 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-10T16:34:21,926 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T16:34:21,926 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-10T16:34:21,927 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-10T16:34:21,927 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-10T16:34:21,928 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:40043 2024-12-10T16:34:21,930 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40043 connecting to ZooKeeper ensemble=127.0.0.1:53765 2024-12-10T16:34:21,931 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T16:34:21,934 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T16:34:21,947 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:400430x0, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-10T16:34:21,947 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:400430x0, quorum=127.0.0.1:53765, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T16:34:21,947 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40043-0x10010af1c860002 connected 2024-12-10T16:34:21,948 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-10T16:34:21,949 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-10T16:34:21,950 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40043-0x10010af1c860002, quorum=127.0.0.1:53765, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-10T16:34:21,951 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40043-0x10010af1c860002, quorum=127.0.0.1:53765, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-10T16:34:21,952 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40043 2024-12-10T16:34:21,952 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40043 2024-12-10T16:34:21,952 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40043 2024-12-10T16:34:21,953 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40043 2024-12-10T16:34:21,953 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40043 2024-12-10T16:34:21,969 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/4b7737f37de9:0 server-side Connection retries=45 2024-12-10T16:34:21,969 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T16:34:21,970 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-10T16:34:21,970 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-10T16:34:21,970 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T16:34:21,970 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-10T16:34:21,970 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-10T16:34:21,971 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-10T16:34:21,972 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:44673 2024-12-10T16:34:21,973 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44673 connecting to ZooKeeper ensemble=127.0.0.1:53765 2024-12-10T16:34:21,974 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T16:34:21,977 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T16:34:21,988 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:446730x0, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-10T16:34:21,989 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44673-0x10010af1c860003, quorum=127.0.0.1:53765, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T16:34:21,989 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44673-0x10010af1c860003 connected 2024-12-10T16:34:21,989 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-10T16:34:21,990 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-10T16:34:21,992 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44673-0x10010af1c860003, quorum=127.0.0.1:53765, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-10T16:34:21,994 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44673-0x10010af1c860003, quorum=127.0.0.1:53765, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-10T16:34:21,995 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44673 2024-12-10T16:34:21,995 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44673 2024-12-10T16:34:21,996 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44673 2024-12-10T16:34:21,997 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44673 2024-12-10T16:34:21,997 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44673 2024-12-10T16:34:22,012 DEBUG [M:0;4b7737f37de9:42829 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;4b7737f37de9:42829 2024-12-10T16:34:22,013 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/4b7737f37de9,42829,1733848461149 2024-12-10T16:34:22,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40043-0x10010af1c860002, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T16:34:22,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42829-0x10010af1c860000, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T16:34:22,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35753-0x10010af1c860001, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T16:34:22,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44673-0x10010af1c860003, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T16:34:22,030 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42829-0x10010af1c860000, quorum=127.0.0.1:53765, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/4b7737f37de9,42829,1733848461149 2024-12-10T16:34:22,055 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35753-0x10010af1c860001, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-10T16:34:22,055 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42829-0x10010af1c860000, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T16:34:22,055 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40043-0x10010af1c860002, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-10T16:34:22,055 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44673-0x10010af1c860003, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-10T16:34:22,055 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44673-0x10010af1c860003, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T16:34:22,055 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35753-0x10010af1c860001, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T16:34:22,055 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40043-0x10010af1c860002, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T16:34:22,056 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42829-0x10010af1c860000, quorum=127.0.0.1:53765, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-10T16:34:22,057 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/4b7737f37de9,42829,1733848461149 from backup master directory 2024-12-10T16:34:22,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35753-0x10010af1c860001, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T16:34:22,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40043-0x10010af1c860002, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T16:34:22,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42829-0x10010af1c860000, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/4b7737f37de9,42829,1733848461149 2024-12-10T16:34:22,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44673-0x10010af1c860003, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T16:34:22,064 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42829-0x10010af1c860000, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T16:34:22,065 WARN [master/4b7737f37de9:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-10T16:34:22,065 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=4b7737f37de9,42829,1733848461149 2024-12-10T16:34:22,069 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-10T16:34:22,070 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-10T16:34:22,123 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/hbase.id] with ID: 95428ff9-68ba-4f3a-a203-efee9a6f1c7c 2024-12-10T16:34:22,123 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/.tmp/hbase.id 2024-12-10T16:34:22,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741826_1002 (size=42) 2024-12-10T16:34:22,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741826_1002 (size=42) 2024-12-10T16:34:22,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741826_1002 (size=42) 2024-12-10T16:34:22,137 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/.tmp/hbase.id]:[hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/hbase.id] 2024-12-10T16:34:22,181 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T16:34:22,187 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-10T16:34:22,206 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 16ms. 2024-12-10T16:34:22,213 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44673-0x10010af1c860003, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T16:34:22,213 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40043-0x10010af1c860002, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T16:34:22,213 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42829-0x10010af1c860000, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T16:34:22,213 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35753-0x10010af1c860001, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T16:34:22,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741827_1003 (size=196) 2024-12-10T16:34:22,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741827_1003 (size=196) 2024-12-10T16:34:22,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741827_1003 (size=196) 2024-12-10T16:34:22,249 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T16:34:22,250 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-10T16:34:22,256 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T16:34:22,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741828_1004 (size=1189) 2024-12-10T16:34:22,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741828_1004 (size=1189) 2024-12-10T16:34:22,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741828_1004 (size=1189) 2024-12-10T16:34:22,300 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/MasterData/data/master/store 2024-12-10T16:34:22,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741829_1005 (size=34) 2024-12-10T16:34:22,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741829_1005 (size=34) 2024-12-10T16:34:22,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741829_1005 (size=34) 2024-12-10T16:34:22,325 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-10T16:34:22,328 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T16:34:22,329 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-10T16:34:22,330 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T16:34:22,330 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T16:34:22,331 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-10T16:34:22,332 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T16:34:22,332 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T16:34:22,333 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733848462329Disabling compacts and flushes for region at 1733848462329Disabling writes for close at 1733848462331 (+2 ms)Writing region close event to WAL at 1733848462332 (+1 ms)Closed at 1733848462332 2024-12-10T16:34:22,335 WARN [master/4b7737f37de9:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/MasterData/data/master/store/.initializing 2024-12-10T16:34:22,335 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/MasterData/WALs/4b7737f37de9,42829,1733848461149 2024-12-10T16:34:22,343 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T16:34:22,358 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4b7737f37de9%2C42829%2C1733848461149, suffix=, logDir=hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/MasterData/WALs/4b7737f37de9,42829,1733848461149, archiveDir=hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/MasterData/oldWALs, maxLogs=10 2024-12-10T16:34:22,387 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/MasterData/WALs/4b7737f37de9,42829,1733848461149/4b7737f37de9%2C42829%2C1733848461149.1733848462362, exclude list is [], retry=0 2024-12-10T16:34:22,404 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T16:34:22,409 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43913,DS-3efadbaf-2f12-4fe8-9fd8-91ba1926bf09,DISK] 2024-12-10T16:34:22,409 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46873,DS-ddfb4a98-1093-42ea-847d-0d86a4cd1023,DISK] 2024-12-10T16:34:22,409 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42039,DS-c05f9a42-583c-4f58-bba4-77e13705c0bc,DISK] 2024-12-10T16:34:22,412 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-10T16:34:22,450 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/MasterData/WALs/4b7737f37de9,42829,1733848461149/4b7737f37de9%2C42829%2C1733848461149.1733848462362 2024-12-10T16:34:22,451 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44417:44417),(127.0.0.1/127.0.0.1:36795:36795),(127.0.0.1/127.0.0.1:42633:42633)] 2024-12-10T16:34:22,451 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-10T16:34:22,452 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T16:34:22,454 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T16:34:22,455 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T16:34:22,492 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-10T16:34:22,514 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-10T16:34:22,517 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:22,520 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T16:34:22,521 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-10T16:34:22,524 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-10T16:34:22,524 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:22,525 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:22,525 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-10T16:34:22,528 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-10T16:34:22,528 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:22,529 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:22,529 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-10T16:34:22,532 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-10T16:34:22,532 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:22,533 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:22,533 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T16:34:22,536 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-10T16:34:22,538 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-10T16:34:22,542 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T16:34:22,543 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T16:34:22,546 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-10T16:34:22,549 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T16:34:22,553 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T16:34:22,554 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59297946, jitterRate=-0.11639174818992615}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-10T16:34:22,559 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733848462469Initializing all the Stores at 1733848462471 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733848462472 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848462473 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848462473Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848462473Cleaning up temporary data from old regions at 1733848462543 (+70 ms)Region opened successfully at 1733848462559 (+16 ms) 2024-12-10T16:34:22,560 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-10T16:34:22,590 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@463513f6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=4b7737f37de9/172.17.0.3:0 2024-12-10T16:34:22,617 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-10T16:34:22,626 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-10T16:34:22,626 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-10T16:34:22,629 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-10T16:34:22,630 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-10T16:34:22,635 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-12-10T16:34:22,635 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-10T16:34:22,657 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-10T16:34:22,664 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42829-0x10010af1c860000, quorum=127.0.0.1:53765, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-10T16:34:22,705 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-10T16:34:22,707 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-10T16:34:22,710 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42829-0x10010af1c860000, quorum=127.0.0.1:53765, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-10T16:34:22,719 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-10T16:34:22,722 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-10T16:34:22,726 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42829-0x10010af1c860000, quorum=127.0.0.1:53765, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-10T16:34:22,787 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-10T16:34:22,791 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42829-0x10010af1c860000, quorum=127.0.0.1:53765, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-10T16:34:22,923 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-10T16:34:22,946 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42829-0x10010af1c860000, quorum=127.0.0.1:53765, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-10T16:34:22,954 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-10T16:34:22,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35753-0x10010af1c860001, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-10T16:34:22,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40043-0x10010af1c860002, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-10T16:34:22,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42829-0x10010af1c860000, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-10T16:34:22,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40043-0x10010af1c860002, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T16:34:22,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44673-0x10010af1c860003, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-10T16:34:22,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42829-0x10010af1c860000, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T16:34:22,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44673-0x10010af1c860003, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T16:34:22,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35753-0x10010af1c860001, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T16:34:22,968 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=4b7737f37de9,42829,1733848461149, sessionid=0x10010af1c860000, setting cluster-up flag (Was=false) 2024-12-10T16:34:22,996 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40043-0x10010af1c860002, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T16:34:22,996 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35753-0x10010af1c860001, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T16:34:22,996 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42829-0x10010af1c860000, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T16:34:22,996 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44673-0x10010af1c860003, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T16:34:23,022 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-10T16:34:23,026 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=4b7737f37de9,42829,1733848461149 2024-12-10T16:34:23,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35753-0x10010af1c860001, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T16:34:23,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42829-0x10010af1c860000, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T16:34:23,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40043-0x10010af1c860002, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T16:34:23,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44673-0x10010af1c860003, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T16:34:23,071 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-10T16:34:23,073 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=4b7737f37de9,42829,1733848461149 2024-12-10T16:34:23,082 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-10T16:34:23,101 INFO [RS:2;4b7737f37de9:44673 {}] regionserver.HRegionServer(746): ClusterId : 95428ff9-68ba-4f3a-a203-efee9a6f1c7c 2024-12-10T16:34:23,101 INFO [RS:0;4b7737f37de9:35753 {}] regionserver.HRegionServer(746): ClusterId : 95428ff9-68ba-4f3a-a203-efee9a6f1c7c 2024-12-10T16:34:23,101 INFO [RS:1;4b7737f37de9:40043 {}] regionserver.HRegionServer(746): ClusterId : 95428ff9-68ba-4f3a-a203-efee9a6f1c7c 2024-12-10T16:34:23,104 DEBUG [RS:2;4b7737f37de9:44673 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-10T16:34:23,104 DEBUG [RS:1;4b7737f37de9:40043 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-10T16:34:23,104 DEBUG [RS:0;4b7737f37de9:35753 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-10T16:34:23,115 DEBUG [RS:0;4b7737f37de9:35753 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-10T16:34:23,115 DEBUG [RS:1;4b7737f37de9:40043 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-10T16:34:23,115 DEBUG [RS:2;4b7737f37de9:44673 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-10T16:34:23,115 DEBUG [RS:0;4b7737f37de9:35753 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-10T16:34:23,115 DEBUG [RS:2;4b7737f37de9:44673 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-10T16:34:23,115 DEBUG [RS:1;4b7737f37de9:40043 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-10T16:34:23,123 DEBUG [RS:2;4b7737f37de9:44673 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-10T16:34:23,123 DEBUG [RS:1;4b7737f37de9:40043 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-10T16:34:23,123 DEBUG [RS:0;4b7737f37de9:35753 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-10T16:34:23,123 DEBUG [RS:2;4b7737f37de9:44673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f8bd0a3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=4b7737f37de9/172.17.0.3:0 2024-12-10T16:34:23,123 DEBUG [RS:1;4b7737f37de9:40043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@565d3fa8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=4b7737f37de9/172.17.0.3:0 2024-12-10T16:34:23,123 DEBUG [RS:0;4b7737f37de9:35753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3df3273d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=4b7737f37de9/172.17.0.3:0 2024-12-10T16:34:23,138 DEBUG [RS:2;4b7737f37de9:44673 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;4b7737f37de9:44673 2024-12-10T16:34:23,141 INFO [RS:2;4b7737f37de9:44673 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-10T16:34:23,141 INFO [RS:2;4b7737f37de9:44673 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-10T16:34:23,141 DEBUG [RS:2;4b7737f37de9:44673 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-10T16:34:23,141 DEBUG [RS:0;4b7737f37de9:35753 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;4b7737f37de9:35753 2024-12-10T16:34:23,141 DEBUG [RS:1;4b7737f37de9:40043 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;4b7737f37de9:40043 2024-12-10T16:34:23,142 INFO [RS:1;4b7737f37de9:40043 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-10T16:34:23,142 INFO [RS:0;4b7737f37de9:35753 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-10T16:34:23,142 INFO [RS:1;4b7737f37de9:40043 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-10T16:34:23,142 INFO [RS:0;4b7737f37de9:35753 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-10T16:34:23,142 DEBUG [RS:0;4b7737f37de9:35753 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-10T16:34:23,142 DEBUG [RS:1;4b7737f37de9:40043 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-10T16:34:23,144 INFO [RS:2;4b7737f37de9:44673 {}] regionserver.HRegionServer(2659): reportForDuty to master=4b7737f37de9,42829,1733848461149 with port=44673, startcode=1733848461969 2024-12-10T16:34:23,144 INFO [RS:0;4b7737f37de9:35753 {}] regionserver.HRegionServer(2659): reportForDuty to master=4b7737f37de9,42829,1733848461149 with port=35753, startcode=1733848461832 2024-12-10T16:34:23,144 INFO [RS:1;4b7737f37de9:40043 {}] regionserver.HRegionServer(2659): reportForDuty to master=4b7737f37de9,42829,1733848461149 with port=40043, startcode=1733848461924 2024-12-10T16:34:23,155 DEBUG [RS:2;4b7737f37de9:44673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-10T16:34:23,155 DEBUG [RS:1;4b7737f37de9:40043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-10T16:34:23,155 DEBUG [RS:0;4b7737f37de9:35753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-10T16:34:23,163 INFO [AsyncFSWAL-0-hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/MasterData-prefix:4b7737f37de9,42829,1733848461149 {}] compress.Compression(560): Loaded codec org.apache.hadoop.hbase.io.compress.ReusableStreamGzipCodec for compression algorithm GZ 2024-12-10T16:34:23,185 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-10T16:34:23,187 INFO [HMaster-EventLoopGroup-2-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:58017, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-10T16:34:23,187 INFO [HMaster-EventLoopGroup-2-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42999, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-10T16:34:23,187 INFO [HMaster-EventLoopGroup-2-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42353, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-10T16:34:23,192 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42829 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-12-10T16:34:23,196 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-10T16:34:23,197 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42829 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-12-10T16:34:23,198 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42829 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-12-10T16:34:23,204 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-10T16:34:23,209 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 4b7737f37de9,42829,1733848461149 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-10T16:34:23,215 DEBUG [RS:1;4b7737f37de9:40043 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-10T16:34:23,215 DEBUG [RS:2;4b7737f37de9:44673 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-10T16:34:23,215 DEBUG [RS:0;4b7737f37de9:35753 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-10T16:34:23,215 WARN [RS:1;4b7737f37de9:40043 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-10T16:34:23,215 WARN [RS:2;4b7737f37de9:44673 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-10T16:34:23,215 WARN [RS:0;4b7737f37de9:35753 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-10T16:34:23,215 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/4b7737f37de9:0, corePoolSize=5, maxPoolSize=5 2024-12-10T16:34:23,216 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/4b7737f37de9:0, corePoolSize=5, maxPoolSize=5 2024-12-10T16:34:23,216 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/4b7737f37de9:0, corePoolSize=5, maxPoolSize=5 2024-12-10T16:34:23,216 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/4b7737f37de9:0, corePoolSize=5, maxPoolSize=5 2024-12-10T16:34:23,216 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/4b7737f37de9:0, corePoolSize=10, maxPoolSize=10 2024-12-10T16:34:23,216 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/4b7737f37de9:0, corePoolSize=1, maxPoolSize=1 2024-12-10T16:34:23,216 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/4b7737f37de9:0, corePoolSize=2, maxPoolSize=2 2024-12-10T16:34:23,216 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/4b7737f37de9:0, corePoolSize=1, maxPoolSize=1 2024-12-10T16:34:23,217 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733848493217 2024-12-10T16:34:23,219 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-10T16:34:23,220 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-10T16:34:23,221 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-10T16:34:23,222 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-10T16:34:23,223 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-10T16:34:23,223 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-10T16:34:23,223 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-10T16:34:23,223 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-10T16:34:23,224 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-10T16:34:23,227 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-10T16:34:23,227 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:23,228 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-10T16:34:23,228 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-10T16:34:23,228 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-10T16:34:23,230 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-10T16:34:23,230 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-10T16:34:23,232 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/4b7737f37de9:0:becomeActiveMaster-HFileCleaner.large.0-1733848463231,5,FailOnTimeoutGroup] 2024-12-10T16:34:23,233 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/4b7737f37de9:0:becomeActiveMaster-HFileCleaner.small.0-1733848463232,5,FailOnTimeoutGroup] 2024-12-10T16:34:23,233 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-10T16:34:23,233 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-10T16:34:23,234 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-10T16:34:23,234 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-10T16:34:23,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741831_1007 (size=1321) 2024-12-10T16:34:23,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741831_1007 (size=1321) 2024-12-10T16:34:23,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741831_1007 (size=1321) 2024-12-10T16:34:23,275 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-10T16:34:23,276 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f 2024-12-10T16:34:23,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741832_1008 (size=32) 2024-12-10T16:34:23,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741832_1008 (size=32) 2024-12-10T16:34:23,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741832_1008 (size=32) 2024-12-10T16:34:23,292 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T16:34:23,294 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-10T16:34:23,297 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-10T16:34:23,297 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:23,298 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T16:34:23,299 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-10T16:34:23,301 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-10T16:34:23,301 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:23,302 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T16:34:23,302 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-10T16:34:23,304 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-10T16:34:23,305 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:23,306 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T16:34:23,306 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-10T16:34:23,308 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-10T16:34:23,308 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:23,309 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T16:34:23,309 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-10T16:34:23,311 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/hbase/meta/1588230740 2024-12-10T16:34:23,312 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/hbase/meta/1588230740 2024-12-10T16:34:23,314 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-10T16:34:23,315 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-10T16:34:23,316 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-10T16:34:23,316 INFO [RS:2;4b7737f37de9:44673 {}] regionserver.HRegionServer(2659): reportForDuty to master=4b7737f37de9,42829,1733848461149 with port=44673, startcode=1733848461969 2024-12-10T16:34:23,316 INFO [RS:1;4b7737f37de9:40043 {}] regionserver.HRegionServer(2659): reportForDuty to master=4b7737f37de9,42829,1733848461149 with port=40043, startcode=1733848461924 2024-12-10T16:34:23,316 INFO [RS:0;4b7737f37de9:35753 {}] regionserver.HRegionServer(2659): reportForDuty to master=4b7737f37de9,42829,1733848461149 with port=35753, startcode=1733848461832 2024-12-10T16:34:23,318 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42829 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 4b7737f37de9,40043,1733848461924 2024-12-10T16:34:23,319 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-10T16:34:23,322 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42829 {}] master.ServerManager(517): Registering regionserver=4b7737f37de9,40043,1733848461924 2024-12-10T16:34:23,322 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T16:34:23,323 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64545650, jitterRate=-0.038194864988327026}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-10T16:34:23,326 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733848463292Initializing all the Stores at 1733848463294 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733848463294Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733848463294Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848463294Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733848463294Cleaning up temporary data from old regions at 1733848463315 (+21 ms)Region opened successfully at 1733848463325 (+10 ms) 2024-12-10T16:34:23,326 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-10T16:34:23,326 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-10T16:34:23,326 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-10T16:34:23,326 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-10T16:34:23,326 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-10T16:34:23,328 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-10T16:34:23,328 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733848463326Disabling compacts and flushes for region at 1733848463326Disabling writes for close at 1733848463326Writing region close event to WAL at 1733848463327 (+1 ms)Closed at 1733848463328 (+1 ms) 2024-12-10T16:34:23,331 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42829 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 4b7737f37de9,44673,1733848461969 2024-12-10T16:34:23,331 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-10T16:34:23,331 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-10T16:34:23,331 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42829 {}] master.ServerManager(517): Registering regionserver=4b7737f37de9,44673,1733848461969 2024-12-10T16:34:23,331 DEBUG [RS:1;4b7737f37de9:40043 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f 2024-12-10T16:34:23,331 DEBUG [RS:1;4b7737f37de9:40043 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35477 2024-12-10T16:34:23,332 DEBUG [RS:1;4b7737f37de9:40043 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-10T16:34:23,335 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42829 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 4b7737f37de9,35753,1733848461832 2024-12-10T16:34:23,335 DEBUG [RS:2;4b7737f37de9:44673 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f 2024-12-10T16:34:23,335 DEBUG [RS:2;4b7737f37de9:44673 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35477 2024-12-10T16:34:23,335 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42829 {}] master.ServerManager(517): Registering regionserver=4b7737f37de9,35753,1733848461832 2024-12-10T16:34:23,335 DEBUG [RS:2;4b7737f37de9:44673 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-10T16:34:23,338 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-10T16:34:23,339 DEBUG [RS:0;4b7737f37de9:35753 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f 2024-12-10T16:34:23,339 DEBUG [RS:0;4b7737f37de9:35753 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35477 2024-12-10T16:34:23,339 DEBUG [RS:0;4b7737f37de9:35753 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-10T16:34:23,344 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42829-0x10010af1c860000, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-10T16:34:23,346 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-10T16:34:23,349 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-10T16:34:23,369 DEBUG [RS:1;4b7737f37de9:40043 {}] zookeeper.ZKUtil(111): regionserver:40043-0x10010af1c860002, quorum=127.0.0.1:53765, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/4b7737f37de9,40043,1733848461924 2024-12-10T16:34:23,369 WARN [RS:1;4b7737f37de9:40043 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-10T16:34:23,369 INFO [RS:1;4b7737f37de9:40043 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T16:34:23,369 DEBUG [RS:2;4b7737f37de9:44673 {}] zookeeper.ZKUtil(111): regionserver:44673-0x10010af1c860003, quorum=127.0.0.1:53765, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/4b7737f37de9,44673,1733848461969 2024-12-10T16:34:23,369 DEBUG [RS:0;4b7737f37de9:35753 {}] zookeeper.ZKUtil(111): regionserver:35753-0x10010af1c860001, quorum=127.0.0.1:53765, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/4b7737f37de9,35753,1733848461832 2024-12-10T16:34:23,369 DEBUG [RS:1;4b7737f37de9:40043 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/WALs/4b7737f37de9,40043,1733848461924 2024-12-10T16:34:23,369 WARN [RS:2;4b7737f37de9:44673 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-10T16:34:23,369 WARN [RS:0;4b7737f37de9:35753 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-10T16:34:23,370 INFO [RS:0;4b7737f37de9:35753 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T16:34:23,370 INFO [RS:2;4b7737f37de9:44673 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T16:34:23,370 DEBUG [RS:0;4b7737f37de9:35753 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/WALs/4b7737f37de9,35753,1733848461832 2024-12-10T16:34:23,370 DEBUG [RS:2;4b7737f37de9:44673 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/WALs/4b7737f37de9,44673,1733848461969 2024-12-10T16:34:23,371 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [4b7737f37de9,40043,1733848461924] 2024-12-10T16:34:23,372 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [4b7737f37de9,35753,1733848461832] 2024-12-10T16:34:23,372 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [4b7737f37de9,44673,1733848461969] 2024-12-10T16:34:23,398 INFO [RS:0;4b7737f37de9:35753 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-10T16:34:23,398 INFO [RS:2;4b7737f37de9:44673 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-10T16:34:23,398 INFO [RS:1;4b7737f37de9:40043 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-10T16:34:23,410 INFO [RS:0;4b7737f37de9:35753 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-10T16:34:23,410 INFO [RS:2;4b7737f37de9:44673 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-10T16:34:23,410 INFO [RS:1;4b7737f37de9:40043 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-10T16:34:23,415 INFO [RS:2;4b7737f37de9:44673 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-10T16:34:23,415 INFO [RS:1;4b7737f37de9:40043 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-10T16:34:23,415 INFO [RS:0;4b7737f37de9:35753 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-10T16:34:23,415 INFO [RS:1;4b7737f37de9:40043 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T16:34:23,415 INFO [RS:2;4b7737f37de9:44673 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T16:34:23,415 INFO [RS:0;4b7737f37de9:35753 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T16:34:23,416 INFO [RS:1;4b7737f37de9:40043 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-10T16:34:23,416 INFO [RS:2;4b7737f37de9:44673 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-10T16:34:23,416 INFO [RS:0;4b7737f37de9:35753 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-10T16:34:23,421 INFO [RS:2;4b7737f37de9:44673 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-10T16:34:23,421 INFO [RS:1;4b7737f37de9:40043 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-10T16:34:23,421 INFO [RS:0;4b7737f37de9:35753 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-10T16:34:23,423 INFO [RS:1;4b7737f37de9:40043 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-10T16:34:23,423 INFO [RS:0;4b7737f37de9:35753 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-10T16:34:23,423 INFO [RS:2;4b7737f37de9:44673 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-10T16:34:23,423 DEBUG [RS:1;4b7737f37de9:40043 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/4b7737f37de9:0, corePoolSize=1, maxPoolSize=1 2024-12-10T16:34:23,423 DEBUG [RS:0;4b7737f37de9:35753 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/4b7737f37de9:0, corePoolSize=1, maxPoolSize=1 2024-12-10T16:34:23,423 DEBUG [RS:2;4b7737f37de9:44673 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/4b7737f37de9:0, corePoolSize=1, maxPoolSize=1 2024-12-10T16:34:23,424 DEBUG [RS:1;4b7737f37de9:40043 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/4b7737f37de9:0, corePoolSize=1, maxPoolSize=1 2024-12-10T16:34:23,424 DEBUG [RS:0;4b7737f37de9:35753 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/4b7737f37de9:0, corePoolSize=1, maxPoolSize=1 2024-12-10T16:34:23,424 DEBUG [RS:1;4b7737f37de9:40043 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/4b7737f37de9:0, corePoolSize=1, maxPoolSize=1 2024-12-10T16:34:23,424 DEBUG [RS:2;4b7737f37de9:44673 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/4b7737f37de9:0, corePoolSize=1, maxPoolSize=1 2024-12-10T16:34:23,424 DEBUG [RS:1;4b7737f37de9:40043 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/4b7737f37de9:0, corePoolSize=1, maxPoolSize=1 2024-12-10T16:34:23,424 DEBUG [RS:0;4b7737f37de9:35753 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/4b7737f37de9:0, corePoolSize=1, maxPoolSize=1 2024-12-10T16:34:23,424 DEBUG [RS:2;4b7737f37de9:44673 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/4b7737f37de9:0, corePoolSize=1, maxPoolSize=1 2024-12-10T16:34:23,424 DEBUG [RS:1;4b7737f37de9:40043 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/4b7737f37de9:0, corePoolSize=1, maxPoolSize=1 2024-12-10T16:34:23,424 DEBUG [RS:0;4b7737f37de9:35753 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/4b7737f37de9:0, corePoolSize=1, maxPoolSize=1 2024-12-10T16:34:23,424 DEBUG [RS:1;4b7737f37de9:40043 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/4b7737f37de9:0, corePoolSize=2, maxPoolSize=2 2024-12-10T16:34:23,424 DEBUG [RS:2;4b7737f37de9:44673 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/4b7737f37de9:0, corePoolSize=1, maxPoolSize=1 2024-12-10T16:34:23,424 DEBUG [RS:1;4b7737f37de9:40043 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/4b7737f37de9:0, corePoolSize=1, maxPoolSize=1 2024-12-10T16:34:23,424 DEBUG [RS:0;4b7737f37de9:35753 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/4b7737f37de9:0, corePoolSize=1, maxPoolSize=1 2024-12-10T16:34:23,424 DEBUG [RS:2;4b7737f37de9:44673 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/4b7737f37de9:0, corePoolSize=1, maxPoolSize=1 2024-12-10T16:34:23,424 DEBUG [RS:1;4b7737f37de9:40043 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/4b7737f37de9:0, corePoolSize=1, maxPoolSize=1 2024-12-10T16:34:23,424 DEBUG [RS:0;4b7737f37de9:35753 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/4b7737f37de9:0, corePoolSize=2, maxPoolSize=2 2024-12-10T16:34:23,424 DEBUG [RS:2;4b7737f37de9:44673 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/4b7737f37de9:0, corePoolSize=2, maxPoolSize=2 2024-12-10T16:34:23,424 DEBUG [RS:1;4b7737f37de9:40043 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/4b7737f37de9:0, corePoolSize=1, maxPoolSize=1 2024-12-10T16:34:23,424 DEBUG [RS:0;4b7737f37de9:35753 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/4b7737f37de9:0, corePoolSize=1, maxPoolSize=1 2024-12-10T16:34:23,424 DEBUG [RS:1;4b7737f37de9:40043 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/4b7737f37de9:0, corePoolSize=1, maxPoolSize=1 2024-12-10T16:34:23,424 DEBUG [RS:2;4b7737f37de9:44673 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/4b7737f37de9:0, corePoolSize=1, maxPoolSize=1 2024-12-10T16:34:23,424 DEBUG [RS:1;4b7737f37de9:40043 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/4b7737f37de9:0, corePoolSize=1, maxPoolSize=1 2024-12-10T16:34:23,425 DEBUG [RS:0;4b7737f37de9:35753 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/4b7737f37de9:0, corePoolSize=1, maxPoolSize=1 2024-12-10T16:34:23,425 DEBUG [RS:2;4b7737f37de9:44673 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/4b7737f37de9:0, corePoolSize=1, maxPoolSize=1 2024-12-10T16:34:23,425 DEBUG [RS:1;4b7737f37de9:40043 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/4b7737f37de9:0, corePoolSize=1, maxPoolSize=1 2024-12-10T16:34:23,425 DEBUG [RS:0;4b7737f37de9:35753 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/4b7737f37de9:0, corePoolSize=1, maxPoolSize=1 2024-12-10T16:34:23,425 DEBUG [RS:1;4b7737f37de9:40043 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/4b7737f37de9:0, corePoolSize=3, maxPoolSize=3 2024-12-10T16:34:23,425 DEBUG [RS:2;4b7737f37de9:44673 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/4b7737f37de9:0, corePoolSize=1, maxPoolSize=1 2024-12-10T16:34:23,425 DEBUG [RS:1;4b7737f37de9:40043 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/4b7737f37de9:0, corePoolSize=3, maxPoolSize=3 2024-12-10T16:34:23,425 DEBUG [RS:0;4b7737f37de9:35753 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/4b7737f37de9:0, corePoolSize=1, maxPoolSize=1 2024-12-10T16:34:23,425 DEBUG [RS:2;4b7737f37de9:44673 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/4b7737f37de9:0, corePoolSize=1, maxPoolSize=1 2024-12-10T16:34:23,425 DEBUG [RS:0;4b7737f37de9:35753 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/4b7737f37de9:0, corePoolSize=1, maxPoolSize=1 2024-12-10T16:34:23,425 DEBUG [RS:2;4b7737f37de9:44673 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/4b7737f37de9:0, corePoolSize=1, maxPoolSize=1 2024-12-10T16:34:23,425 DEBUG [RS:0;4b7737f37de9:35753 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/4b7737f37de9:0, corePoolSize=1, maxPoolSize=1 2024-12-10T16:34:23,425 DEBUG [RS:2;4b7737f37de9:44673 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/4b7737f37de9:0, corePoolSize=1, maxPoolSize=1 2024-12-10T16:34:23,425 DEBUG [RS:0;4b7737f37de9:35753 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/4b7737f37de9:0, corePoolSize=3, maxPoolSize=3 2024-12-10T16:34:23,425 DEBUG [RS:2;4b7737f37de9:44673 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/4b7737f37de9:0, corePoolSize=3, maxPoolSize=3 2024-12-10T16:34:23,425 DEBUG [RS:0;4b7737f37de9:35753 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/4b7737f37de9:0, corePoolSize=3, maxPoolSize=3 2024-12-10T16:34:23,425 DEBUG [RS:2;4b7737f37de9:44673 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/4b7737f37de9:0, corePoolSize=3, maxPoolSize=3 2024-12-10T16:34:23,427 INFO [RS:1;4b7737f37de9:40043 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T16:34:23,427 INFO [RS:0;4b7737f37de9:35753 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T16:34:23,427 INFO [RS:2;4b7737f37de9:44673 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T16:34:23,427 INFO [RS:0;4b7737f37de9:35753 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T16:34:23,427 INFO [RS:1;4b7737f37de9:40043 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T16:34:23,427 INFO [RS:2;4b7737f37de9:44673 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T16:34:23,427 INFO [RS:0;4b7737f37de9:35753 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T16:34:23,427 INFO [RS:1;4b7737f37de9:40043 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T16:34:23,427 INFO [RS:2;4b7737f37de9:44673 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T16:34:23,427 INFO [RS:0;4b7737f37de9:35753 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-10T16:34:23,427 INFO [RS:1;4b7737f37de9:40043 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-10T16:34:23,428 INFO [RS:2;4b7737f37de9:44673 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-10T16:34:23,428 INFO [RS:0;4b7737f37de9:35753 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-10T16:34:23,428 INFO [RS:1;4b7737f37de9:40043 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-10T16:34:23,428 INFO [RS:2;4b7737f37de9:44673 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-10T16:34:23,428 INFO [RS:1;4b7737f37de9:40043 {}] hbase.ChoreService(168): Chore ScheduledChore name=4b7737f37de9,40043,1733848461924-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-10T16:34:23,428 INFO [RS:0;4b7737f37de9:35753 {}] hbase.ChoreService(168): Chore ScheduledChore name=4b7737f37de9,35753,1733848461832-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-10T16:34:23,428 INFO [RS:2;4b7737f37de9:44673 {}] hbase.ChoreService(168): Chore ScheduledChore name=4b7737f37de9,44673,1733848461969-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-10T16:34:23,445 INFO [RS:0;4b7737f37de9:35753 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-10T16:34:23,445 INFO [RS:2;4b7737f37de9:44673 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-10T16:34:23,447 INFO [RS:0;4b7737f37de9:35753 {}] hbase.ChoreService(168): Chore ScheduledChore name=4b7737f37de9,35753,1733848461832-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T16:34:23,447 INFO [RS:2;4b7737f37de9:44673 {}] hbase.ChoreService(168): Chore ScheduledChore name=4b7737f37de9,44673,1733848461969-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T16:34:23,448 INFO [RS:0;4b7737f37de9:35753 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T16:34:23,448 INFO [RS:2;4b7737f37de9:44673 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T16:34:23,448 INFO [RS:0;4b7737f37de9:35753 {}] regionserver.Replication(171): 4b7737f37de9,35753,1733848461832 started 2024-12-10T16:34:23,448 INFO [RS:2;4b7737f37de9:44673 {}] regionserver.Replication(171): 4b7737f37de9,44673,1733848461969 started 2024-12-10T16:34:23,448 INFO [RS:1;4b7737f37de9:40043 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-10T16:34:23,448 INFO [RS:1;4b7737f37de9:40043 {}] hbase.ChoreService(168): Chore ScheduledChore name=4b7737f37de9,40043,1733848461924-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T16:34:23,448 INFO [RS:1;4b7737f37de9:40043 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T16:34:23,449 INFO [RS:1;4b7737f37de9:40043 {}] regionserver.Replication(171): 4b7737f37de9,40043,1733848461924 started 2024-12-10T16:34:23,465 INFO [RS:0;4b7737f37de9:35753 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T16:34:23,465 INFO [RS:0;4b7737f37de9:35753 {}] regionserver.HRegionServer(1482): Serving as 4b7737f37de9,35753,1733848461832, RpcServer on 4b7737f37de9/172.17.0.3:35753, sessionid=0x10010af1c860001 2024-12-10T16:34:23,466 DEBUG [RS:0;4b7737f37de9:35753 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-10T16:34:23,466 DEBUG [RS:0;4b7737f37de9:35753 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 4b7737f37de9,35753,1733848461832 2024-12-10T16:34:23,466 DEBUG [RS:0;4b7737f37de9:35753 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4b7737f37de9,35753,1733848461832' 2024-12-10T16:34:23,467 DEBUG [RS:0;4b7737f37de9:35753 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-10T16:34:23,468 DEBUG [RS:0;4b7737f37de9:35753 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-10T16:34:23,468 DEBUG [RS:0;4b7737f37de9:35753 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-10T16:34:23,468 DEBUG [RS:0;4b7737f37de9:35753 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-10T16:34:23,469 DEBUG [RS:0;4b7737f37de9:35753 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 4b7737f37de9,35753,1733848461832 2024-12-10T16:34:23,469 DEBUG [RS:0;4b7737f37de9:35753 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4b7737f37de9,35753,1733848461832' 2024-12-10T16:34:23,469 DEBUG [RS:0;4b7737f37de9:35753 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-10T16:34:23,469 DEBUG [RS:0;4b7737f37de9:35753 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-10T16:34:23,470 DEBUG [RS:0;4b7737f37de9:35753 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-10T16:34:23,470 INFO [RS:0;4b7737f37de9:35753 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-10T16:34:23,470 INFO [RS:0;4b7737f37de9:35753 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-10T16:34:23,470 INFO [RS:1;4b7737f37de9:40043 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T16:34:23,470 INFO [RS:2;4b7737f37de9:44673 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T16:34:23,471 INFO [RS:2;4b7737f37de9:44673 {}] regionserver.HRegionServer(1482): Serving as 4b7737f37de9,44673,1733848461969, RpcServer on 4b7737f37de9/172.17.0.3:44673, sessionid=0x10010af1c860003 2024-12-10T16:34:23,471 INFO [RS:1;4b7737f37de9:40043 {}] regionserver.HRegionServer(1482): Serving as 4b7737f37de9,40043,1733848461924, RpcServer on 4b7737f37de9/172.17.0.3:40043, sessionid=0x10010af1c860002 2024-12-10T16:34:23,471 DEBUG [RS:2;4b7737f37de9:44673 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-10T16:34:23,471 DEBUG [RS:1;4b7737f37de9:40043 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-10T16:34:23,471 DEBUG [RS:1;4b7737f37de9:40043 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 4b7737f37de9,40043,1733848461924 2024-12-10T16:34:23,471 DEBUG [RS:2;4b7737f37de9:44673 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 4b7737f37de9,44673,1733848461969 2024-12-10T16:34:23,471 DEBUG [RS:1;4b7737f37de9:40043 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4b7737f37de9,40043,1733848461924' 2024-12-10T16:34:23,471 DEBUG [RS:2;4b7737f37de9:44673 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4b7737f37de9,44673,1733848461969' 2024-12-10T16:34:23,471 DEBUG [RS:1;4b7737f37de9:40043 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-10T16:34:23,471 DEBUG [RS:2;4b7737f37de9:44673 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-10T16:34:23,472 DEBUG [RS:1;4b7737f37de9:40043 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-10T16:34:23,472 DEBUG [RS:2;4b7737f37de9:44673 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-10T16:34:23,473 DEBUG [RS:1;4b7737f37de9:40043 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-10T16:34:23,473 DEBUG [RS:1;4b7737f37de9:40043 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-10T16:34:23,473 DEBUG [RS:1;4b7737f37de9:40043 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 4b7737f37de9,40043,1733848461924 2024-12-10T16:34:23,473 DEBUG [RS:2;4b7737f37de9:44673 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-10T16:34:23,473 DEBUG [RS:1;4b7737f37de9:40043 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4b7737f37de9,40043,1733848461924' 2024-12-10T16:34:23,473 DEBUG [RS:2;4b7737f37de9:44673 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-10T16:34:23,473 DEBUG [RS:1;4b7737f37de9:40043 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-10T16:34:23,473 DEBUG [RS:2;4b7737f37de9:44673 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 4b7737f37de9,44673,1733848461969 2024-12-10T16:34:23,473 DEBUG [RS:2;4b7737f37de9:44673 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4b7737f37de9,44673,1733848461969' 2024-12-10T16:34:23,473 DEBUG [RS:2;4b7737f37de9:44673 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-10T16:34:23,473 DEBUG [RS:1;4b7737f37de9:40043 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-10T16:34:23,473 DEBUG [RS:2;4b7737f37de9:44673 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-10T16:34:23,474 DEBUG [RS:1;4b7737f37de9:40043 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-10T16:34:23,474 DEBUG [RS:2;4b7737f37de9:44673 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-10T16:34:23,474 INFO [RS:1;4b7737f37de9:40043 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-10T16:34:23,474 INFO [RS:2;4b7737f37de9:44673 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-10T16:34:23,474 INFO [RS:1;4b7737f37de9:40043 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-10T16:34:23,474 INFO [RS:2;4b7737f37de9:44673 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-10T16:34:23,500 WARN [4b7737f37de9:42829 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-10T16:34:23,575 INFO [RS:1;4b7737f37de9:40043 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T16:34:23,575 INFO [RS:2;4b7737f37de9:44673 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T16:34:23,575 INFO [RS:0;4b7737f37de9:35753 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T16:34:23,578 INFO [RS:2;4b7737f37de9:44673 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4b7737f37de9%2C44673%2C1733848461969, suffix=, logDir=hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/WALs/4b7737f37de9,44673,1733848461969, archiveDir=hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/oldWALs, maxLogs=32 2024-12-10T16:34:23,578 INFO [RS:1;4b7737f37de9:40043 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4b7737f37de9%2C40043%2C1733848461924, suffix=, logDir=hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/WALs/4b7737f37de9,40043,1733848461924, archiveDir=hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/oldWALs, maxLogs=32 2024-12-10T16:34:23,578 INFO [RS:0;4b7737f37de9:35753 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4b7737f37de9%2C35753%2C1733848461832, suffix=, logDir=hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/WALs/4b7737f37de9,35753,1733848461832, archiveDir=hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/oldWALs, maxLogs=32 2024-12-10T16:34:23,594 DEBUG [RS:2;4b7737f37de9:44673 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/WALs/4b7737f37de9,44673,1733848461969/4b7737f37de9%2C44673%2C1733848461969.1733848463581, exclude list is [], retry=0 2024-12-10T16:34:23,599 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42039,DS-c05f9a42-583c-4f58-bba4-77e13705c0bc,DISK] 2024-12-10T16:34:23,599 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43913,DS-3efadbaf-2f12-4fe8-9fd8-91ba1926bf09,DISK] 2024-12-10T16:34:23,599 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46873,DS-ddfb4a98-1093-42ea-847d-0d86a4cd1023,DISK] 2024-12-10T16:34:23,600 DEBUG [RS:1;4b7737f37de9:40043 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/WALs/4b7737f37de9,40043,1733848461924/4b7737f37de9%2C40043%2C1733848461924.1733848463581, exclude list is [], retry=0 2024-12-10T16:34:23,601 DEBUG [RS:0;4b7737f37de9:35753 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/WALs/4b7737f37de9,35753,1733848461832/4b7737f37de9%2C35753%2C1733848461832.1733848463581, exclude list is [], retry=0 2024-12-10T16:34:23,606 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46873,DS-ddfb4a98-1093-42ea-847d-0d86a4cd1023,DISK] 2024-12-10T16:34:23,606 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46873,DS-ddfb4a98-1093-42ea-847d-0d86a4cd1023,DISK] 2024-12-10T16:34:23,606 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42039,DS-c05f9a42-583c-4f58-bba4-77e13705c0bc,DISK] 2024-12-10T16:34:23,606 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43913,DS-3efadbaf-2f12-4fe8-9fd8-91ba1926bf09,DISK] 2024-12-10T16:34:23,606 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42039,DS-c05f9a42-583c-4f58-bba4-77e13705c0bc,DISK] 2024-12-10T16:34:23,606 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43913,DS-3efadbaf-2f12-4fe8-9fd8-91ba1926bf09,DISK] 2024-12-10T16:34:23,609 INFO [RS:2;4b7737f37de9:44673 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/WALs/4b7737f37de9,44673,1733848461969/4b7737f37de9%2C44673%2C1733848461969.1733848463581 2024-12-10T16:34:23,611 DEBUG [RS:2;4b7737f37de9:44673 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42633:42633),(127.0.0.1/127.0.0.1:36795:36795),(127.0.0.1/127.0.0.1:44417:44417)] 2024-12-10T16:34:23,611 INFO [RS:1;4b7737f37de9:40043 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/WALs/4b7737f37de9,40043,1733848461924/4b7737f37de9%2C40043%2C1733848461924.1733848463581 2024-12-10T16:34:23,613 INFO [RS:0;4b7737f37de9:35753 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/WALs/4b7737f37de9,35753,1733848461832/4b7737f37de9%2C35753%2C1733848461832.1733848463581 2024-12-10T16:34:23,613 DEBUG [RS:1;4b7737f37de9:40043 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42633:42633),(127.0.0.1/127.0.0.1:36795:36795),(127.0.0.1/127.0.0.1:44417:44417)] 2024-12-10T16:34:23,613 DEBUG [RS:0;4b7737f37de9:35753 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44417:44417),(127.0.0.1/127.0.0.1:42633:42633),(127.0.0.1/127.0.0.1:36795:36795)] 2024-12-10T16:34:23,753 DEBUG [4b7737f37de9:42829 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-10T16:34:23,763 DEBUG [4b7737f37de9:42829 {}] balancer.BalancerClusterState(204): Hosts are {4b7737f37de9=0} racks are {/default-rack=0} 2024-12-10T16:34:23,769 DEBUG [4b7737f37de9:42829 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-10T16:34:23,769 DEBUG [4b7737f37de9:42829 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-10T16:34:23,769 DEBUG [4b7737f37de9:42829 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-10T16:34:23,769 DEBUG [4b7737f37de9:42829 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-10T16:34:23,769 DEBUG [4b7737f37de9:42829 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-10T16:34:23,769 DEBUG [4b7737f37de9:42829 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-10T16:34:23,769 INFO [4b7737f37de9:42829 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-10T16:34:23,769 INFO [4b7737f37de9:42829 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-10T16:34:23,769 INFO [4b7737f37de9:42829 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-10T16:34:23,769 DEBUG [4b7737f37de9:42829 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-10T16:34:23,776 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=4b7737f37de9,40043,1733848461924 2024-12-10T16:34:23,780 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 4b7737f37de9,40043,1733848461924, state=OPENING 2024-12-10T16:34:23,813 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-10T16:34:23,822 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40043-0x10010af1c860002, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T16:34:23,822 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42829-0x10010af1c860000, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T16:34:23,822 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35753-0x10010af1c860001, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T16:34:23,822 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44673-0x10010af1c860003, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T16:34:23,824 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T16:34:23,824 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T16:34:23,824 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T16:34:23,825 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T16:34:23,827 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-10T16:34:23,829 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=4b7737f37de9,40043,1733848461924}] 2024-12-10T16:34:24,010 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-10T16:34:24,013 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34771, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-10T16:34:24,023 INFO [RS_OPEN_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-10T16:34:24,024 INFO [RS_OPEN_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T16:34:24,024 INFO [RS_OPEN_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-10T16:34:24,028 INFO [RS_OPEN_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4b7737f37de9%2C40043%2C1733848461924.meta, suffix=.meta, logDir=hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/WALs/4b7737f37de9,40043,1733848461924, archiveDir=hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/oldWALs, maxLogs=32 2024-12-10T16:34:24,043 DEBUG [RS_OPEN_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/WALs/4b7737f37de9,40043,1733848461924/4b7737f37de9%2C40043%2C1733848461924.meta.1733848464030.meta, exclude list is [], retry=0 2024-12-10T16:34:24,046 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43913,DS-3efadbaf-2f12-4fe8-9fd8-91ba1926bf09,DISK] 2024-12-10T16:34:24,046 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46873,DS-ddfb4a98-1093-42ea-847d-0d86a4cd1023,DISK] 2024-12-10T16:34:24,047 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42039,DS-c05f9a42-583c-4f58-bba4-77e13705c0bc,DISK] 2024-12-10T16:34:24,051 INFO [RS_OPEN_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/WALs/4b7737f37de9,40043,1733848461924/4b7737f37de9%2C40043%2C1733848461924.meta.1733848464030.meta 2024-12-10T16:34:24,052 DEBUG [RS_OPEN_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42633:42633),(127.0.0.1/127.0.0.1:36795:36795),(127.0.0.1/127.0.0.1:44417:44417)] 2024-12-10T16:34:24,052 DEBUG [RS_OPEN_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-10T16:34:24,053 DEBUG [RS_OPEN_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-10T16:34:24,055 DEBUG [RS_OPEN_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-10T16:34:24,060 INFO [RS_OPEN_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-10T16:34:24,064 DEBUG [RS_OPEN_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-10T16:34:24,065 DEBUG [RS_OPEN_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T16:34:24,065 DEBUG [RS_OPEN_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-10T16:34:24,065 DEBUG [RS_OPEN_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-10T16:34:24,068 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-10T16:34:24,070 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-10T16:34:24,070 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:24,071 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T16:34:24,071 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-10T16:34:24,072 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-10T16:34:24,072 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:24,073 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T16:34:24,073 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-10T16:34:24,074 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-10T16:34:24,075 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:24,075 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T16:34:24,075 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-10T16:34:24,076 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-10T16:34:24,077 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:24,077 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T16:34:24,078 DEBUG [RS_OPEN_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-10T16:34:24,079 DEBUG [RS_OPEN_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/hbase/meta/1588230740 2024-12-10T16:34:24,082 DEBUG [RS_OPEN_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/hbase/meta/1588230740 2024-12-10T16:34:24,085 DEBUG [RS_OPEN_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-10T16:34:24,085 DEBUG [RS_OPEN_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-10T16:34:24,086 DEBUG [RS_OPEN_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-10T16:34:24,089 DEBUG [RS_OPEN_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-10T16:34:24,091 INFO [RS_OPEN_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61654820, jitterRate=-0.08127158880233765}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-10T16:34:24,091 DEBUG [RS_OPEN_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-10T16:34:24,093 DEBUG [RS_OPEN_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733848464066Writing region info on filesystem at 1733848464066Initializing all the Stores at 1733848464068 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733848464068Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733848464068Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848464068Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733848464068Cleaning up temporary data from old regions at 1733848464085 (+17 ms)Running coprocessor post-open hooks at 1733848464091 (+6 ms)Region opened successfully at 1733848464093 (+2 ms) 2024-12-10T16:34:24,100 INFO [RS_OPEN_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733848464002 2024-12-10T16:34:24,112 DEBUG [RS_OPEN_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-10T16:34:24,113 INFO [RS_OPEN_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-10T16:34:24,114 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=4b7737f37de9,40043,1733848461924 2024-12-10T16:34:24,116 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 4b7737f37de9,40043,1733848461924, state=OPEN 2024-12-10T16:34:24,127 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40043-0x10010af1c860002, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-10T16:34:24,127 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35753-0x10010af1c860001, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-10T16:34:24,127 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42829-0x10010af1c860000, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-10T16:34:24,127 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44673-0x10010af1c860003, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-10T16:34:24,127 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T16:34:24,127 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T16:34:24,127 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T16:34:24,127 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T16:34:24,128 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=4b7737f37de9,40043,1733848461924 2024-12-10T16:34:24,134 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-10T16:34:24,135 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=4b7737f37de9,40043,1733848461924 in 299 msec 2024-12-10T16:34:24,141 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-10T16:34:24,141 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 798 msec 2024-12-10T16:34:24,143 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-10T16:34:24,143 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-10T16:34:24,163 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-10T16:34:24,164 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4b7737f37de9,40043,1733848461924, seqNum=-1] 2024-12-10T16:34:24,183 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T16:34:24,186 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39641, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T16:34:24,205 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0920 sec 2024-12-10T16:34:24,206 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733848464205, completionTime=-1 2024-12-10T16:34:24,209 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-10T16:34:24,209 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-10T16:34:24,239 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-10T16:34:24,239 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733848524239 2024-12-10T16:34:24,239 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733848584239 2024-12-10T16:34:24,239 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 29 msec 2024-12-10T16:34:24,241 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-10T16:34:24,248 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4b7737f37de9,42829,1733848461149-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T16:34:24,248 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4b7737f37de9,42829,1733848461149-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T16:34:24,248 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4b7737f37de9,42829,1733848461149-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T16:34:24,250 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-4b7737f37de9:42829, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T16:34:24,250 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-10T16:34:24,251 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-10T16:34:24,257 DEBUG [master/4b7737f37de9:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-10T16:34:24,282 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.216sec 2024-12-10T16:34:24,283 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-10T16:34:24,285 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-10T16:34:24,285 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-10T16:34:24,286 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-10T16:34:24,286 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-10T16:34:24,286 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4b7737f37de9,42829,1733848461149-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-10T16:34:24,287 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4b7737f37de9,42829,1733848461149-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-10T16:34:24,317 DEBUG [master/4b7737f37de9:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-10T16:34:24,318 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-10T16:34:24,318 INFO [master/4b7737f37de9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4b7737f37de9,42829,1733848461149-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T16:34:24,325 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d6e3190, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T16:34:24,326 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 4b7737f37de9,42829,-1 for getting cluster id 2024-12-10T16:34:24,329 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-10T16:34:24,343 DEBUG [HMaster-EventLoopGroup-2-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '95428ff9-68ba-4f3a-a203-efee9a6f1c7c' 2024-12-10T16:34:24,345 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-10T16:34:24,345 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "95428ff9-68ba-4f3a-a203-efee9a6f1c7c" 2024-12-10T16:34:24,346 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@318817cd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T16:34:24,346 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4b7737f37de9,42829,-1] 2024-12-10T16:34:24,348 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-10T16:34:24,350 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T16:34:24,351 INFO [HMaster-EventLoopGroup-2-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57756, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-10T16:34:24,354 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f808dbb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T16:34:24,355 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-10T16:34:24,361 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4b7737f37de9,40043,1733848461924, seqNum=-1] 2024-12-10T16:34:24,362 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T16:34:24,364 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51350, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T16:34:24,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=4b7737f37de9,42829,1733848461149 2024-12-10T16:34:24,383 INFO [Time-limited test {}] wal.AbstractTestWALReplay(147): hbase.rootdir=hdfs://localhost:35477/hbase 2024-12-10T16:34:24,394 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testNameConflictWhenSplit0 Thread=365, OpenFileDescriptor=607, MaxFileDescriptor=1048576, SystemLoadAverage=190, ProcessCount=11, AvailableMemoryMB=5288 2024-12-10T16:34:24,416 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T16:34:24,419 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T16:34:24,420 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T16:34:24,425 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-84429569, suffix=, logDir=hdfs://localhost:35477/hbase/WALs/hregion-84429569, archiveDir=hdfs://localhost:35477/hbase/oldWALs, maxLogs=32 2024-12-10T16:34:24,444 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-84429569/hregion-84429569.1733848464427, exclude list is [], retry=0 2024-12-10T16:34:24,448 DEBUG [AsyncFSWAL-8-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43913,DS-3efadbaf-2f12-4fe8-9fd8-91ba1926bf09,DISK] 2024-12-10T16:34:24,448 DEBUG [AsyncFSWAL-8-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42039,DS-c05f9a42-583c-4f58-bba4-77e13705c0bc,DISK] 2024-12-10T16:34:24,448 DEBUG [AsyncFSWAL-8-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46873,DS-ddfb4a98-1093-42ea-847d-0d86a4cd1023,DISK] 2024-12-10T16:34:24,451 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-84429569/hregion-84429569.1733848464427 2024-12-10T16:34:24,452 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44417:44417),(127.0.0.1/127.0.0.1:36795:36795),(127.0.0.1/127.0.0.1:42633:42633)] 2024-12-10T16:34:24,452 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 0a88f42ff3cd74daeeadd0f738983cb1, NAME => 'testReplayEditsWrittenIntoWAL,,1733848464417.0a88f42ff3cd74daeeadd0f738983cb1.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenIntoWAL', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35477/hbase 2024-12-10T16:34:24,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741838_1014 (size=64) 2024-12-10T16:34:24,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741838_1014 (size=64) 2024-12-10T16:34:24,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741838_1014 (size=64) 2024-12-10T16:34:24,472 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1733848464417.0a88f42ff3cd74daeeadd0f738983cb1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T16:34:24,475 INFO [StoreOpener-0a88f42ff3cd74daeeadd0f738983cb1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 0a88f42ff3cd74daeeadd0f738983cb1 2024-12-10T16:34:24,477 INFO [StoreOpener-0a88f42ff3cd74daeeadd0f738983cb1-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0a88f42ff3cd74daeeadd0f738983cb1 columnFamilyName a 2024-12-10T16:34:24,477 DEBUG [StoreOpener-0a88f42ff3cd74daeeadd0f738983cb1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:24,478 INFO [StoreOpener-0a88f42ff3cd74daeeadd0f738983cb1-1 {}] regionserver.HStore(327): Store=0a88f42ff3cd74daeeadd0f738983cb1/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:24,478 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 0a88f42ff3cd74daeeadd0f738983cb1 2024-12-10T16:34:24,480 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/0a88f42ff3cd74daeeadd0f738983cb1 2024-12-10T16:34:24,481 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/0a88f42ff3cd74daeeadd0f738983cb1 2024-12-10T16:34:24,481 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 0a88f42ff3cd74daeeadd0f738983cb1 2024-12-10T16:34:24,482 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 0a88f42ff3cd74daeeadd0f738983cb1 2024-12-10T16:34:24,485 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 0a88f42ff3cd74daeeadd0f738983cb1 2024-12-10T16:34:24,489 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/0a88f42ff3cd74daeeadd0f738983cb1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T16:34:24,490 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 0a88f42ff3cd74daeeadd0f738983cb1; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75331393, jitterRate=0.12252523005008698}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T16:34:24,490 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 0a88f42ff3cd74daeeadd0f738983cb1: Writing region info on filesystem at 1733848464473Initializing all the Stores at 1733848464474 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848464474Cleaning up temporary data from old regions at 1733848464482 (+8 ms)Region opened successfully at 1733848464490 (+8 ms) 2024-12-10T16:34:24,491 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 0a88f42ff3cd74daeeadd0f738983cb1, disabling compactions & flushes 2024-12-10T16:34:24,491 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenIntoWAL,,1733848464417.0a88f42ff3cd74daeeadd0f738983cb1. 2024-12-10T16:34:24,491 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenIntoWAL,,1733848464417.0a88f42ff3cd74daeeadd0f738983cb1. 2024-12-10T16:34:24,491 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenIntoWAL,,1733848464417.0a88f42ff3cd74daeeadd0f738983cb1. after waiting 0 ms 2024-12-10T16:34:24,491 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenIntoWAL,,1733848464417.0a88f42ff3cd74daeeadd0f738983cb1. 2024-12-10T16:34:24,491 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenIntoWAL,,1733848464417.0a88f42ff3cd74daeeadd0f738983cb1. 2024-12-10T16:34:24,491 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 0a88f42ff3cd74daeeadd0f738983cb1: Waiting for close lock at 1733848464491Disabling compacts and flushes for region at 1733848464491Disabling writes for close at 1733848464491Writing region close event to WAL at 1733848464491Closed at 1733848464491 2024-12-10T16:34:24,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741837_1013 (size=95) 2024-12-10T16:34:24,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741837_1013 (size=95) 2024-12-10T16:34:24,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741837_1013 (size=95) 2024-12-10T16:34:24,502 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-10T16:34:24,502 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-84429569:(num 1733848464427) 2024-12-10T16:34:24,504 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor testMonitor 2024-12-10T16:34:24,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741839_1015 (size=320) 2024-12-10T16:34:24,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741839_1015 (size=320) 2024-12-10T16:34:24,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741839_1015 (size=320) 2024-12-10T16:34:24,523 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor testMonitor 2024-12-10T16:34:24,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741840_1016 (size=253) 2024-12-10T16:34:24,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741840_1016 (size=253) 2024-12-10T16:34:24,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741840_1016 (size=253) 2024-12-10T16:34:24,553 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:35477/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733848464414/wal-1, size=320 (320bytes) 2024-12-10T16:34:24,553 DEBUG [Time-limited test {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-10T16:34:24,553 DEBUG [Time-limited test {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-10T16:34:24,553 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35477/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733848464414/wal-1 2024-12-10T16:34:24,559 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:35477/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733848464414/wal-1 after 4ms 2024-12-10T16:34:24,565 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:35477/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733848464414/wal-1: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T16:34:24,566 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:35477/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733848464414/wal-1 took 15ms 2024-12-10T16:34:24,576 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:35477/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733848464414/wal-1 so closing down 2024-12-10T16:34:24,576 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-10T16:34:24,579 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000001-wal-1.temp 2024-12-10T16:34:24,582 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/0a88f42ff3cd74daeeadd0f738983cb1/recovered.edits/0000000000000000001-wal-1.temp 2024-12-10T16:34:24,583 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-10T16:34:24,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741841_1017 (size=320) 2024-12-10T16:34:24,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741841_1017 (size=320) 2024-12-10T16:34:24,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741841_1017 (size=320) 2024-12-10T16:34:24,594 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/0a88f42ff3cd74daeeadd0f738983cb1/recovered.edits/0000000000000000001-wal-1.temp (wrote 2 edits, skipped 0 edits in 0 ms) 2024-12-10T16:34:24,597 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/0a88f42ff3cd74daeeadd0f738983cb1/recovered.edits/0000000000000000001-wal-1.temp to hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/0a88f42ff3cd74daeeadd0f738983cb1/recovered.edits/0000000000000000002 2024-12-10T16:34:24,600 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 2 edits across 1 Regions in 30 ms; skipped=0; WAL=hdfs://localhost:35477/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733848464414/wal-1, size=320, length=320, corrupted=false, cancelled=false 2024-12-10T16:34:24,600 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:35477/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733848464414/wal-1, journal: Splitting hdfs://localhost:35477/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733848464414/wal-1, size=320 (320bytes) at 1733848464553Finishing writing output for hdfs://localhost:35477/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733848464414/wal-1 so closing down at 1733848464576 (+23 ms)Creating recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/0a88f42ff3cd74daeeadd0f738983cb1/recovered.edits/0000000000000000001-wal-1.temp at 1733848464582 (+6 ms)3 split writer threads finished at 1733848464583 (+1 ms)Closed recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/0a88f42ff3cd74daeeadd0f738983cb1/recovered.edits/0000000000000000001-wal-1.temp (wrote 2 edits, skipped 0 edits in 0 ms) at 1733848464594 (+11 ms)Rename recovered edits hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/0a88f42ff3cd74daeeadd0f738983cb1/recovered.edits/0000000000000000001-wal-1.temp to hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/0a88f42ff3cd74daeeadd0f738983cb1/recovered.edits/0000000000000000002 at 1733848464597 (+3 ms)Processed 2 edits across 1 Regions in 30 ms; skipped=0; WAL=hdfs://localhost:35477/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733848464414/wal-1, size=320, length=320, corrupted=false, cancelled=false at 1733848464600 (+3 ms) 2024-12-10T16:34:24,613 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:35477/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733848464414/wal-2, size=253 (253bytes) 2024-12-10T16:34:24,613 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35477/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733848464414/wal-2 2024-12-10T16:34:24,614 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:35477/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733848464414/wal-2 after 1ms 2024-12-10T16:34:24,619 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:35477/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733848464414/wal-2: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T16:34:24,619 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:35477/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733848464414/wal-2 took 6ms 2024-12-10T16:34:24,622 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:35477/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733848464414/wal-2 so closing down 2024-12-10T16:34:24,622 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-10T16:34:24,625 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000002-wal-2.temp 2024-12-10T16:34:24,626 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/0a88f42ff3cd74daeeadd0f738983cb1/recovered.edits/0000000000000000002-wal-2.temp 2024-12-10T16:34:24,627 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-10T16:34:24,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741842_1018 (size=253) 2024-12-10T16:34:24,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741842_1018 (size=253) 2024-12-10T16:34:24,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741842_1018 (size=253) 2024-12-10T16:34:24,636 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/0a88f42ff3cd74daeeadd0f738983cb1/recovered.edits/0000000000000000002-wal-2.temp (wrote 1 edits, skipped 0 edits in 0 ms) 2024-12-10T16:34:24,640 DEBUG [split-log-closeStream-pool-0 {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/0a88f42ff3cd74daeeadd0f738983cb1/recovered.edits/0000000000000000002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T16:34:24,643 WARN [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(184): Found existing old edits file and we have less entries. Deleting hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/0a88f42ff3cd74daeeadd0f738983cb1/recovered.edits/0000000000000000002-wal-2.temp, length=253 2024-12-10T16:34:24,644 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 1 edits across 1 Regions in 24 ms; skipped=0; WAL=hdfs://localhost:35477/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733848464414/wal-2, size=253, length=253, corrupted=false, cancelled=false 2024-12-10T16:34:24,644 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:35477/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733848464414/wal-2, journal: Splitting hdfs://localhost:35477/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733848464414/wal-2, size=253 (253bytes) at 1733848464613Finishing writing output for hdfs://localhost:35477/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733848464414/wal-2 so closing down at 1733848464622 (+9 ms)Creating recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/0a88f42ff3cd74daeeadd0f738983cb1/recovered.edits/0000000000000000002-wal-2.temp at 1733848464627 (+5 ms)3 split writer threads finished at 1733848464627Closed recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/0a88f42ff3cd74daeeadd0f738983cb1/recovered.edits/0000000000000000002-wal-2.temp (wrote 1 edits, skipped 0 edits in 0 ms) at 1733848464636 (+9 ms)Processed 1 edits across 1 Regions in 24 ms; skipped=0; WAL=hdfs://localhost:35477/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733848464414/wal-2, size=253, length=253, corrupted=false, cancelled=false at 1733848464644 (+8 ms) 2024-12-10T16:34:24,645 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-10T16:34:24,647 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:35477/hbase/WALs/testnameconflictwhensplit0-manual,16010,1733848464414, archiveDir=hdfs://localhost:35477/hbase/oldWALs, maxLogs=32 2024-12-10T16:34:24,660 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testnameconflictwhensplit0-manual,16010,1733848464414/wal.1733848464648, exclude list is [], retry=0 2024-12-10T16:34:24,664 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46873,DS-ddfb4a98-1093-42ea-847d-0d86a4cd1023,DISK] 2024-12-10T16:34:24,664 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43913,DS-3efadbaf-2f12-4fe8-9fd8-91ba1926bf09,DISK] 2024-12-10T16:34:24,665 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42039,DS-c05f9a42-583c-4f58-bba4-77e13705c0bc,DISK] 2024-12-10T16:34:24,667 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testnameconflictwhensplit0-manual,16010,1733848464414/wal.1733848464648 2024-12-10T16:34:24,672 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42633:42633),(127.0.0.1/127.0.0.1:36795:36795),(127.0.0.1/127.0.0.1:44417:44417)] 2024-12-10T16:34:24,672 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 0a88f42ff3cd74daeeadd0f738983cb1, NAME => 'testReplayEditsWrittenIntoWAL,,1733848464417.0a88f42ff3cd74daeeadd0f738983cb1.', STARTKEY => '', ENDKEY => ''} 2024-12-10T16:34:24,672 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1733848464417.0a88f42ff3cd74daeeadd0f738983cb1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T16:34:24,672 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 0a88f42ff3cd74daeeadd0f738983cb1 2024-12-10T16:34:24,672 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 0a88f42ff3cd74daeeadd0f738983cb1 2024-12-10T16:34:24,675 INFO [StoreOpener-0a88f42ff3cd74daeeadd0f738983cb1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 0a88f42ff3cd74daeeadd0f738983cb1 2024-12-10T16:34:24,676 INFO [StoreOpener-0a88f42ff3cd74daeeadd0f738983cb1-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0a88f42ff3cd74daeeadd0f738983cb1 columnFamilyName a 2024-12-10T16:34:24,676 DEBUG [StoreOpener-0a88f42ff3cd74daeeadd0f738983cb1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:24,678 INFO [StoreOpener-0a88f42ff3cd74daeeadd0f738983cb1-1 {}] regionserver.HStore(327): Store=0a88f42ff3cd74daeeadd0f738983cb1/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:24,678 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 0a88f42ff3cd74daeeadd0f738983cb1 2024-12-10T16:34:24,680 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/0a88f42ff3cd74daeeadd0f738983cb1 2024-12-10T16:34:24,684 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/0a88f42ff3cd74daeeadd0f738983cb1 2024-12-10T16:34:24,685 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/0a88f42ff3cd74daeeadd0f738983cb1/recovered.edits/0000000000000000002 2024-12-10T16:34:24,689 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/0a88f42ff3cd74daeeadd0f738983cb1/recovered.edits/0000000000000000002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T16:34:24,694 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 2, skipped 0, firstSequenceIdInLog=1, maxSequenceIdInLog=2, path=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/0a88f42ff3cd74daeeadd0f738983cb1/recovered.edits/0000000000000000002 2024-12-10T16:34:24,698 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 0a88f42ff3cd74daeeadd0f738983cb1 1/1 column families, dataSize=108 B heapSize=512 B 2024-12-10T16:34:24,743 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/0a88f42ff3cd74daeeadd0f738983cb1/.tmp/a/39ae5b0cf72d4ddeaa5ad4f41999e50c is 58, key is testReplayEditsWrittenIntoWAL/a:1/1733848464503/Put/seqid=0 2024-12-10T16:34:24,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741844_1020 (size=5170) 2024-12-10T16:34:24,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741844_1020 (size=5170) 2024-12-10T16:34:24,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741844_1020 (size=5170) 2024-12-10T16:34:24,757 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=108 B at sequenceid=2 (bloomFilter=true), to=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/0a88f42ff3cd74daeeadd0f738983cb1/.tmp/a/39ae5b0cf72d4ddeaa5ad4f41999e50c 2024-12-10T16:34:24,800 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/0a88f42ff3cd74daeeadd0f738983cb1/.tmp/a/39ae5b0cf72d4ddeaa5ad4f41999e50c as hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/0a88f42ff3cd74daeeadd0f738983cb1/a/39ae5b0cf72d4ddeaa5ad4f41999e50c 2024-12-10T16:34:24,810 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/0a88f42ff3cd74daeeadd0f738983cb1/a/39ae5b0cf72d4ddeaa5ad4f41999e50c, entries=2, sequenceid=2, filesize=5.0 K 2024-12-10T16:34:24,816 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~108 B/108, heapSize ~496 B/496, currentSize=0 B/0 for 0a88f42ff3cd74daeeadd0f738983cb1 in 117ms, sequenceid=2, compaction requested=false; wal=null 2024-12-10T16:34:24,818 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/0a88f42ff3cd74daeeadd0f738983cb1/recovered.edits/0000000000000000002 2024-12-10T16:34:24,818 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 0a88f42ff3cd74daeeadd0f738983cb1 2024-12-10T16:34:24,818 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 0a88f42ff3cd74daeeadd0f738983cb1 2024-12-10T16:34:24,822 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 0a88f42ff3cd74daeeadd0f738983cb1 2024-12-10T16:34:24,826 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/0a88f42ff3cd74daeeadd0f738983cb1/recovered.edits/2.seqid, newMaxSeqId=2, maxSeqId=1 2024-12-10T16:34:24,828 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 0a88f42ff3cd74daeeadd0f738983cb1; next sequenceid=3; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71956083, jitterRate=0.07222919166088104}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T16:34:24,829 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 0a88f42ff3cd74daeeadd0f738983cb1: Writing region info on filesystem at 1733848464672Initializing all the Stores at 1733848464674 (+2 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848464674Obtaining lock to block concurrent updates at 1733848464698 (+24 ms)Preparing flush snapshotting stores in 0a88f42ff3cd74daeeadd0f738983cb1 at 1733848464698Finished memstore snapshotting testReplayEditsWrittenIntoWAL,,1733848464417.0a88f42ff3cd74daeeadd0f738983cb1., syncing WAL and waiting on mvcc, flushsize=dataSize=108, getHeapSize=496, getOffHeapSize=0, getCellsCount=2 at 1733848464700 (+2 ms)Flushing stores of testReplayEditsWrittenIntoWAL,,1733848464417.0a88f42ff3cd74daeeadd0f738983cb1. at 1733848464701 (+1 ms)Flushing 0a88f42ff3cd74daeeadd0f738983cb1/a: creating writer at 1733848464702 (+1 ms)Flushing 0a88f42ff3cd74daeeadd0f738983cb1/a: appending metadata at 1733848464735 (+33 ms)Flushing 0a88f42ff3cd74daeeadd0f738983cb1/a: closing flushed file at 1733848464737 (+2 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2e7f7041: reopening flushed file at 1733848464799 (+62 ms)Finished flush of dataSize ~108 B/108, heapSize ~496 B/496, currentSize=0 B/0 for 0a88f42ff3cd74daeeadd0f738983cb1 in 117ms, sequenceid=2, compaction requested=false; wal=null at 1733848464816 (+17 ms)Cleaning up temporary data from old regions at 1733848464819 (+3 ms)Region opened successfully at 1733848464829 (+10 ms) 2024-12-10T16:34:24,856 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testNameConflictWhenSplit0 Thread=377 (was 365) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_545150750_22 at /127.0.0.1:39580 [Receiving block BP-1758511473-172.17.0.3-1733848457790:blk_1073741843_1019] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1758511473-172.17.0.3-1733848457790:blk_1073741843_1019, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_545150750_22 at /127.0.0.1:39400 [Waiting for operation #8] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-8-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1758511473-172.17.0.3-1733848457790:blk_1073741843_1019, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: TestAsyncWALReplay-pool-0 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_545150750_22 at /127.0.0.1:59870 [Waiting for operation #10] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_545150750_22 at /127.0.0.1:59894 [Receiving block BP-1758511473-172.17.0.3-1733848457790:blk_1073741843_1019] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-8-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_545150750_22 at /127.0.0.1:37598 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-8-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1758511473-172.17.0.3-1733848457790:blk_1073741843_1019, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-0-hdfs://localhost:35477/hbase-prefix:default java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_545150750_22 at /127.0.0.1:37744 [Receiving block BP-1758511473-172.17.0.3-1733848457790:blk_1073741843_1019] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=703 (was 607) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=190 (was 190), ProcessCount=11 (was 11), AvailableMemoryMB=5277 (was 5288) 2024-12-10T16:34:24,867 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testNameConflictWhenSplit1 Thread=377, OpenFileDescriptor=703, MaxFileDescriptor=1048576, SystemLoadAverage=190, ProcessCount=11, AvailableMemoryMB=5275 2024-12-10T16:34:24,887 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T16:34:24,889 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T16:34:24,890 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T16:34:24,894 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-83767351, suffix=, logDir=hdfs://localhost:35477/hbase/WALs/hregion-83767351, archiveDir=hdfs://localhost:35477/hbase/oldWALs, maxLogs=32 2024-12-10T16:34:24,906 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-83767351/hregion-83767351.1733848464895, exclude list is [], retry=0 2024-12-10T16:34:24,910 DEBUG [AsyncFSWAL-10-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43913,DS-3efadbaf-2f12-4fe8-9fd8-91ba1926bf09,DISK] 2024-12-10T16:34:24,911 DEBUG [AsyncFSWAL-10-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46873,DS-ddfb4a98-1093-42ea-847d-0d86a4cd1023,DISK] 2024-12-10T16:34:24,911 DEBUG [AsyncFSWAL-10-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42039,DS-c05f9a42-583c-4f58-bba4-77e13705c0bc,DISK] 2024-12-10T16:34:24,915 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-83767351/hregion-83767351.1733848464895 2024-12-10T16:34:24,916 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36795:36795),(127.0.0.1/127.0.0.1:42633:42633),(127.0.0.1/127.0.0.1:44417:44417)] 2024-12-10T16:34:24,916 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 14b8f88d768951e68904cc4e578b3a8e, NAME => 'testReplayEditsWrittenIntoWAL,,1733848464887.14b8f88d768951e68904cc4e578b3a8e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenIntoWAL', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35477/hbase 2024-12-10T16:34:24,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741846_1022 (size=64) 2024-12-10T16:34:24,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741846_1022 (size=64) 2024-12-10T16:34:24,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741846_1022 (size=64) 2024-12-10T16:34:24,928 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1733848464887.14b8f88d768951e68904cc4e578b3a8e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T16:34:24,929 INFO [StoreOpener-14b8f88d768951e68904cc4e578b3a8e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 14b8f88d768951e68904cc4e578b3a8e 2024-12-10T16:34:24,932 INFO [StoreOpener-14b8f88d768951e68904cc4e578b3a8e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 14b8f88d768951e68904cc4e578b3a8e columnFamilyName a 2024-12-10T16:34:24,932 DEBUG [StoreOpener-14b8f88d768951e68904cc4e578b3a8e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:24,933 INFO [StoreOpener-14b8f88d768951e68904cc4e578b3a8e-1 {}] regionserver.HStore(327): Store=14b8f88d768951e68904cc4e578b3a8e/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:24,933 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 14b8f88d768951e68904cc4e578b3a8e 2024-12-10T16:34:24,934 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/14b8f88d768951e68904cc4e578b3a8e 2024-12-10T16:34:24,934 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/14b8f88d768951e68904cc4e578b3a8e 2024-12-10T16:34:24,935 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 14b8f88d768951e68904cc4e578b3a8e 2024-12-10T16:34:24,935 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 14b8f88d768951e68904cc4e578b3a8e 2024-12-10T16:34:24,939 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 14b8f88d768951e68904cc4e578b3a8e 2024-12-10T16:34:24,942 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/14b8f88d768951e68904cc4e578b3a8e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T16:34:24,943 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 14b8f88d768951e68904cc4e578b3a8e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68201165, jitterRate=0.01627655327320099}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T16:34:24,943 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 14b8f88d768951e68904cc4e578b3a8e: Writing region info on filesystem at 1733848464928Initializing all the Stores at 1733848464929 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848464929Cleaning up temporary data from old regions at 1733848464935 (+6 ms)Region opened successfully at 1733848464943 (+8 ms) 2024-12-10T16:34:24,944 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 14b8f88d768951e68904cc4e578b3a8e, disabling compactions & flushes 2024-12-10T16:34:24,944 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenIntoWAL,,1733848464887.14b8f88d768951e68904cc4e578b3a8e. 2024-12-10T16:34:24,944 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenIntoWAL,,1733848464887.14b8f88d768951e68904cc4e578b3a8e. 2024-12-10T16:34:24,944 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenIntoWAL,,1733848464887.14b8f88d768951e68904cc4e578b3a8e. after waiting 0 ms 2024-12-10T16:34:24,944 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenIntoWAL,,1733848464887.14b8f88d768951e68904cc4e578b3a8e. 2024-12-10T16:34:24,944 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenIntoWAL,,1733848464887.14b8f88d768951e68904cc4e578b3a8e. 2024-12-10T16:34:24,944 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 14b8f88d768951e68904cc4e578b3a8e: Waiting for close lock at 1733848464944Disabling compacts and flushes for region at 1733848464944Disabling writes for close at 1733848464944Writing region close event to WAL at 1733848464944Closed at 1733848464944 2024-12-10T16:34:24,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741845_1021 (size=95) 2024-12-10T16:34:24,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741845_1021 (size=95) 2024-12-10T16:34:24,948 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /hbase/WALs/hregion-83767351/hregion-83767351.1733848464895 not finished, retry = 0 2024-12-10T16:34:24,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741845_1021 (size=95) 2024-12-10T16:34:25,053 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-10T16:34:25,053 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-83767351:(num 1733848464895) 2024-12-10T16:34:25,054 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor testMonitor 2024-12-10T16:34:25,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741847_1023 (size=320) 2024-12-10T16:34:25,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741847_1023 (size=320) 2024-12-10T16:34:25,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741847_1023 (size=320) 2024-12-10T16:34:25,070 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor testMonitor 2024-12-10T16:34:25,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741848_1024 (size=253) 2024-12-10T16:34:25,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741848_1024 (size=253) 2024-12-10T16:34:25,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741848_1024 (size=253) 2024-12-10T16:34:25,097 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:35477/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733848464886/wal-2, size=253 (253bytes) 2024-12-10T16:34:25,097 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35477/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733848464886/wal-2 2024-12-10T16:34:25,098 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:35477/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733848464886/wal-2 after 1ms 2024-12-10T16:34:25,101 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:35477/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733848464886/wal-2: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T16:34:25,102 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:35477/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733848464886/wal-2 took 5ms 2024-12-10T16:34:25,104 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:35477/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733848464886/wal-2 so closing down 2024-12-10T16:34:25,104 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-10T16:34:25,106 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000002-wal-2.temp 2024-12-10T16:34:25,107 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/14b8f88d768951e68904cc4e578b3a8e/recovered.edits/0000000000000000002-wal-2.temp 2024-12-10T16:34:25,107 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-10T16:34:25,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741849_1025 (size=253) 2024-12-10T16:34:25,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741849_1025 (size=253) 2024-12-10T16:34:25,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741849_1025 (size=253) 2024-12-10T16:34:25,115 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/14b8f88d768951e68904cc4e578b3a8e/recovered.edits/0000000000000000002-wal-2.temp (wrote 1 edits, skipped 0 edits in 0 ms) 2024-12-10T16:34:25,117 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/14b8f88d768951e68904cc4e578b3a8e/recovered.edits/0000000000000000002-wal-2.temp to hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/14b8f88d768951e68904cc4e578b3a8e/recovered.edits/0000000000000000002 2024-12-10T16:34:25,117 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 1 edits across 1 Regions in 15 ms; skipped=0; WAL=hdfs://localhost:35477/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733848464886/wal-2, size=253, length=253, corrupted=false, cancelled=false 2024-12-10T16:34:25,117 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:35477/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733848464886/wal-2, journal: Splitting hdfs://localhost:35477/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733848464886/wal-2, size=253 (253bytes) at 1733848465097Finishing writing output for hdfs://localhost:35477/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733848464886/wal-2 so closing down at 1733848465104 (+7 ms)Creating recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/14b8f88d768951e68904cc4e578b3a8e/recovered.edits/0000000000000000002-wal-2.temp at 1733848465107 (+3 ms)3 split writer threads finished at 1733848465107Closed recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/14b8f88d768951e68904cc4e578b3a8e/recovered.edits/0000000000000000002-wal-2.temp (wrote 1 edits, skipped 0 edits in 0 ms) at 1733848465115 (+8 ms)Rename recovered edits hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/14b8f88d768951e68904cc4e578b3a8e/recovered.edits/0000000000000000002-wal-2.temp to hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/14b8f88d768951e68904cc4e578b3a8e/recovered.edits/0000000000000000002 at 1733848465117 (+2 ms)Processed 1 edits across 1 Regions in 15 ms; skipped=0; WAL=hdfs://localhost:35477/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733848464886/wal-2, size=253, length=253, corrupted=false, cancelled=false at 1733848465117 2024-12-10T16:34:25,133 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:35477/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733848464886/wal-1, size=320 (320bytes) 2024-12-10T16:34:25,133 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35477/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733848464886/wal-1 2024-12-10T16:34:25,133 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:35477/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733848464886/wal-1 after 0ms 2024-12-10T16:34:25,136 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:35477/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733848464886/wal-1: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T16:34:25,137 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:35477/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733848464886/wal-1 took 5ms 2024-12-10T16:34:25,139 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:35477/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733848464886/wal-1 so closing down 2024-12-10T16:34:25,139 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-10T16:34:25,141 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000001-wal-1.temp 2024-12-10T16:34:25,142 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/14b8f88d768951e68904cc4e578b3a8e/recovered.edits/0000000000000000001-wal-1.temp 2024-12-10T16:34:25,143 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-10T16:34:25,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741850_1026 (size=320) 2024-12-10T16:34:25,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741850_1026 (size=320) 2024-12-10T16:34:25,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741850_1026 (size=320) 2024-12-10T16:34:25,150 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/14b8f88d768951e68904cc4e578b3a8e/recovered.edits/0000000000000000001-wal-1.temp (wrote 2 edits, skipped 0 edits in 0 ms) 2024-12-10T16:34:25,155 DEBUG [split-log-closeStream-pool-0 {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/14b8f88d768951e68904cc4e578b3a8e/recovered.edits/0000000000000000002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T16:34:25,157 WARN [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(175): Found existing old edits file. It could be the result of a previous failed split attempt or we have duplicated wal entries. Deleting hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/14b8f88d768951e68904cc4e578b3a8e/recovered.edits/0000000000000000002, length=253 2024-12-10T16:34:25,160 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/14b8f88d768951e68904cc4e578b3a8e/recovered.edits/0000000000000000001-wal-1.temp to hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/14b8f88d768951e68904cc4e578b3a8e/recovered.edits/0000000000000000002 2024-12-10T16:34:25,160 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 2 edits across 1 Regions in 23 ms; skipped=0; WAL=hdfs://localhost:35477/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733848464886/wal-1, size=320, length=320, corrupted=false, cancelled=false 2024-12-10T16:34:25,160 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:35477/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733848464886/wal-1, journal: Splitting hdfs://localhost:35477/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733848464886/wal-1, size=320 (320bytes) at 1733848465133Finishing writing output for hdfs://localhost:35477/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733848464886/wal-1 so closing down at 1733848465139 (+6 ms)Creating recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/14b8f88d768951e68904cc4e578b3a8e/recovered.edits/0000000000000000001-wal-1.temp at 1733848465142 (+3 ms)3 split writer threads finished at 1733848465143 (+1 ms)Closed recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/14b8f88d768951e68904cc4e578b3a8e/recovered.edits/0000000000000000001-wal-1.temp (wrote 2 edits, skipped 0 edits in 0 ms) at 1733848465150 (+7 ms)Rename recovered edits hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/14b8f88d768951e68904cc4e578b3a8e/recovered.edits/0000000000000000001-wal-1.temp to hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/14b8f88d768951e68904cc4e578b3a8e/recovered.edits/0000000000000000002 at 1733848465160 (+10 ms)Processed 2 edits across 1 Regions in 23 ms; skipped=0; WAL=hdfs://localhost:35477/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733848464886/wal-1, size=320, length=320, corrupted=false, cancelled=false at 1733848465160 2024-12-10T16:34:25,160 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-10T16:34:25,162 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:35477/hbase/WALs/testnameconflictwhensplit1-manual,16010,1733848464886, archiveDir=hdfs://localhost:35477/hbase/oldWALs, maxLogs=32 2024-12-10T16:34:25,174 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testnameconflictwhensplit1-manual,16010,1733848464886/wal.1733848465163, exclude list is [], retry=0 2024-12-10T16:34:25,178 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46873,DS-ddfb4a98-1093-42ea-847d-0d86a4cd1023,DISK] 2024-12-10T16:34:25,178 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42039,DS-c05f9a42-583c-4f58-bba4-77e13705c0bc,DISK] 2024-12-10T16:34:25,179 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43913,DS-3efadbaf-2f12-4fe8-9fd8-91ba1926bf09,DISK] 2024-12-10T16:34:25,181 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testnameconflictwhensplit1-manual,16010,1733848464886/wal.1733848465163 2024-12-10T16:34:25,182 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42633:42633),(127.0.0.1/127.0.0.1:44417:44417),(127.0.0.1/127.0.0.1:36795:36795)] 2024-12-10T16:34:25,182 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 14b8f88d768951e68904cc4e578b3a8e, NAME => 'testReplayEditsWrittenIntoWAL,,1733848464887.14b8f88d768951e68904cc4e578b3a8e.', STARTKEY => '', ENDKEY => ''} 2024-12-10T16:34:25,182 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1733848464887.14b8f88d768951e68904cc4e578b3a8e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T16:34:25,182 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 14b8f88d768951e68904cc4e578b3a8e 2024-12-10T16:34:25,182 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 14b8f88d768951e68904cc4e578b3a8e 2024-12-10T16:34:25,184 INFO [StoreOpener-14b8f88d768951e68904cc4e578b3a8e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 14b8f88d768951e68904cc4e578b3a8e 2024-12-10T16:34:25,186 INFO [StoreOpener-14b8f88d768951e68904cc4e578b3a8e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 14b8f88d768951e68904cc4e578b3a8e columnFamilyName a 2024-12-10T16:34:25,186 DEBUG [StoreOpener-14b8f88d768951e68904cc4e578b3a8e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:25,187 INFO [StoreOpener-14b8f88d768951e68904cc4e578b3a8e-1 {}] regionserver.HStore(327): Store=14b8f88d768951e68904cc4e578b3a8e/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:25,187 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 14b8f88d768951e68904cc4e578b3a8e 2024-12-10T16:34:25,188 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/14b8f88d768951e68904cc4e578b3a8e 2024-12-10T16:34:25,190 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/14b8f88d768951e68904cc4e578b3a8e 2024-12-10T16:34:25,191 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/14b8f88d768951e68904cc4e578b3a8e/recovered.edits/0000000000000000002 2024-12-10T16:34:25,194 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/14b8f88d768951e68904cc4e578b3a8e/recovered.edits/0000000000000000002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T16:34:25,196 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 2, skipped 0, firstSequenceIdInLog=1, maxSequenceIdInLog=2, path=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/14b8f88d768951e68904cc4e578b3a8e/recovered.edits/0000000000000000002 2024-12-10T16:34:25,196 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 14b8f88d768951e68904cc4e578b3a8e 1/1 column families, dataSize=108 B heapSize=512 B 2024-12-10T16:34:25,210 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/14b8f88d768951e68904cc4e578b3a8e/.tmp/a/cd13b71f3b4a4876a50cd4c584823b1e is 58, key is testReplayEditsWrittenIntoWAL/a:1/1733848465053/Put/seqid=0 2024-12-10T16:34:25,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741852_1028 (size=5170) 2024-12-10T16:34:25,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741852_1028 (size=5170) 2024-12-10T16:34:25,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741852_1028 (size=5170) 2024-12-10T16:34:25,219 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=108 B at sequenceid=2 (bloomFilter=true), to=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/14b8f88d768951e68904cc4e578b3a8e/.tmp/a/cd13b71f3b4a4876a50cd4c584823b1e 2024-12-10T16:34:25,226 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/14b8f88d768951e68904cc4e578b3a8e/.tmp/a/cd13b71f3b4a4876a50cd4c584823b1e as hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/14b8f88d768951e68904cc4e578b3a8e/a/cd13b71f3b4a4876a50cd4c584823b1e 2024-12-10T16:34:25,235 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/14b8f88d768951e68904cc4e578b3a8e/a/cd13b71f3b4a4876a50cd4c584823b1e, entries=2, sequenceid=2, filesize=5.0 K 2024-12-10T16:34:25,235 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~108 B/108, heapSize ~496 B/496, currentSize=0 B/0 for 14b8f88d768951e68904cc4e578b3a8e in 39ms, sequenceid=2, compaction requested=false; wal=null 2024-12-10T16:34:25,236 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/14b8f88d768951e68904cc4e578b3a8e/recovered.edits/0000000000000000002 2024-12-10T16:34:25,237 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 14b8f88d768951e68904cc4e578b3a8e 2024-12-10T16:34:25,237 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 14b8f88d768951e68904cc4e578b3a8e 2024-12-10T16:34:25,240 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 14b8f88d768951e68904cc4e578b3a8e 2024-12-10T16:34:25,243 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/14b8f88d768951e68904cc4e578b3a8e/recovered.edits/2.seqid, newMaxSeqId=2, maxSeqId=1 2024-12-10T16:34:25,244 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 14b8f88d768951e68904cc4e578b3a8e; next sequenceid=3; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73499804, jitterRate=0.09523242712020874}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T16:34:25,244 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 14b8f88d768951e68904cc4e578b3a8e: Writing region info on filesystem at 1733848465183Initializing all the Stores at 1733848465184 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848465184Obtaining lock to block concurrent updates at 1733848465196 (+12 ms)Preparing flush snapshotting stores in 14b8f88d768951e68904cc4e578b3a8e at 1733848465196Finished memstore snapshotting testReplayEditsWrittenIntoWAL,,1733848464887.14b8f88d768951e68904cc4e578b3a8e., syncing WAL and waiting on mvcc, flushsize=dataSize=108, getHeapSize=496, getOffHeapSize=0, getCellsCount=2 at 1733848465196Flushing stores of testReplayEditsWrittenIntoWAL,,1733848464887.14b8f88d768951e68904cc4e578b3a8e. at 1733848465196Flushing 14b8f88d768951e68904cc4e578b3a8e/a: creating writer at 1733848465196Flushing 14b8f88d768951e68904cc4e578b3a8e/a: appending metadata at 1733848465209 (+13 ms)Flushing 14b8f88d768951e68904cc4e578b3a8e/a: closing flushed file at 1733848465209Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@87c3787: reopening flushed file at 1733848465225 (+16 ms)Finished flush of dataSize ~108 B/108, heapSize ~496 B/496, currentSize=0 B/0 for 14b8f88d768951e68904cc4e578b3a8e in 39ms, sequenceid=2, compaction requested=false; wal=null at 1733848465235 (+10 ms)Cleaning up temporary data from old regions at 1733848465237 (+2 ms)Region opened successfully at 1733848465244 (+7 ms) 2024-12-10T16:34:25,259 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testNameConflictWhenSplit1 Thread=387 (was 377) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_545150750_22 at /127.0.0.1:59958 [Receiving block BP-1758511473-172.17.0.3-1733848457790:blk_1073741851_1027] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_545150750_22 at /127.0.0.1:59870 [Waiting for operation #12] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_545150750_22 at /127.0.0.1:39652 [Receiving block BP-1758511473-172.17.0.3-1733848457790:blk_1073741851_1027] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1758511473-172.17.0.3-1733848457790:blk_1073741851_1027, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_545150750_22 at /127.0.0.1:37598 [Waiting for operation #15] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-10-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_545150750_22 at /127.0.0.1:37824 [Receiving block BP-1758511473-172.17.0.3-1733848457790:blk_1073741851_1027] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_545150750_22 at /127.0.0.1:39400 [Waiting for operation #10] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1758511473-172.17.0.3-1733848457790:blk_1073741851_1027, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-10-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1758511473-172.17.0.3-1733848457790:blk_1073741851_1027, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-10-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=785 (was 703) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=190 (was 190), ProcessCount=11 (was 11), AvailableMemoryMB=5270 (was 5275) 2024-12-10T16:34:25,270 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsWrittenIntoWAL Thread=387, OpenFileDescriptor=785, MaxFileDescriptor=1048576, SystemLoadAverage=190, ProcessCount=11, AvailableMemoryMB=5269 2024-12-10T16:34:25,288 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T16:34:25,290 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T16:34:25,291 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T16:34:25,295 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-51715510, suffix=, logDir=hdfs://localhost:35477/hbase/WALs/hregion-51715510, archiveDir=hdfs://localhost:35477/hbase/oldWALs, maxLogs=32 2024-12-10T16:34:25,307 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-51715510/hregion-51715510.1733848465296, exclude list is [], retry=0 2024-12-10T16:34:25,311 DEBUG [AsyncFSWAL-12-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46873,DS-ddfb4a98-1093-42ea-847d-0d86a4cd1023,DISK] 2024-12-10T16:34:25,311 DEBUG [AsyncFSWAL-12-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42039,DS-c05f9a42-583c-4f58-bba4-77e13705c0bc,DISK] 2024-12-10T16:34:25,312 DEBUG [AsyncFSWAL-12-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43913,DS-3efadbaf-2f12-4fe8-9fd8-91ba1926bf09,DISK] 2024-12-10T16:34:25,314 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-51715510/hregion-51715510.1733848465296 2024-12-10T16:34:25,315 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42633:42633),(127.0.0.1/127.0.0.1:44417:44417),(127.0.0.1/127.0.0.1:36795:36795)] 2024-12-10T16:34:25,315 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 8d0ffdc7859974c9f5eeb80d8b80a958, NAME => 'testReplayEditsWrittenIntoWAL,,1733848465289.8d0ffdc7859974c9f5eeb80d8b80a958.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenIntoWAL', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35477/hbase 2024-12-10T16:34:25,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741854_1030 (size=64) 2024-12-10T16:34:25,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741854_1030 (size=64) 2024-12-10T16:34:25,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741854_1030 (size=64) 2024-12-10T16:34:25,327 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1733848465289.8d0ffdc7859974c9f5eeb80d8b80a958.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T16:34:25,329 INFO [StoreOpener-8d0ffdc7859974c9f5eeb80d8b80a958-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 8d0ffdc7859974c9f5eeb80d8b80a958 2024-12-10T16:34:25,330 INFO [StoreOpener-8d0ffdc7859974c9f5eeb80d8b80a958-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8d0ffdc7859974c9f5eeb80d8b80a958 columnFamilyName a 2024-12-10T16:34:25,330 DEBUG [StoreOpener-8d0ffdc7859974c9f5eeb80d8b80a958-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:25,331 INFO [StoreOpener-8d0ffdc7859974c9f5eeb80d8b80a958-1 {}] regionserver.HStore(327): Store=8d0ffdc7859974c9f5eeb80d8b80a958/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:25,331 INFO [StoreOpener-8d0ffdc7859974c9f5eeb80d8b80a958-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 8d0ffdc7859974c9f5eeb80d8b80a958 2024-12-10T16:34:25,332 INFO [StoreOpener-8d0ffdc7859974c9f5eeb80d8b80a958-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8d0ffdc7859974c9f5eeb80d8b80a958 columnFamilyName b 2024-12-10T16:34:25,332 DEBUG [StoreOpener-8d0ffdc7859974c9f5eeb80d8b80a958-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:25,333 INFO [StoreOpener-8d0ffdc7859974c9f5eeb80d8b80a958-1 {}] regionserver.HStore(327): Store=8d0ffdc7859974c9f5eeb80d8b80a958/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:25,333 INFO [StoreOpener-8d0ffdc7859974c9f5eeb80d8b80a958-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 8d0ffdc7859974c9f5eeb80d8b80a958 2024-12-10T16:34:25,334 INFO [StoreOpener-8d0ffdc7859974c9f5eeb80d8b80a958-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8d0ffdc7859974c9f5eeb80d8b80a958 columnFamilyName c 2024-12-10T16:34:25,335 DEBUG [StoreOpener-8d0ffdc7859974c9f5eeb80d8b80a958-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:25,335 INFO [StoreOpener-8d0ffdc7859974c9f5eeb80d8b80a958-1 {}] regionserver.HStore(327): Store=8d0ffdc7859974c9f5eeb80d8b80a958/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:25,335 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 8d0ffdc7859974c9f5eeb80d8b80a958 2024-12-10T16:34:25,336 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/8d0ffdc7859974c9f5eeb80d8b80a958 2024-12-10T16:34:25,337 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/8d0ffdc7859974c9f5eeb80d8b80a958 2024-12-10T16:34:25,338 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 8d0ffdc7859974c9f5eeb80d8b80a958 2024-12-10T16:34:25,338 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 8d0ffdc7859974c9f5eeb80d8b80a958 2024-12-10T16:34:25,339 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenIntoWAL descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-10T16:34:25,340 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 8d0ffdc7859974c9f5eeb80d8b80a958 2024-12-10T16:34:25,343 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/8d0ffdc7859974c9f5eeb80d8b80a958/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T16:34:25,344 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 8d0ffdc7859974c9f5eeb80d8b80a958; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67925147, jitterRate=0.012163564562797546}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-10T16:34:25,344 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 8d0ffdc7859974c9f5eeb80d8b80a958: Writing region info on filesystem at 1733848465327Initializing all the Stores at 1733848465328 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848465328Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848465328Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848465328Cleaning up temporary data from old regions at 1733848465338 (+10 ms)Region opened successfully at 1733848465344 (+6 ms) 2024-12-10T16:34:25,344 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 8d0ffdc7859974c9f5eeb80d8b80a958, disabling compactions & flushes 2024-12-10T16:34:25,345 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenIntoWAL,,1733848465289.8d0ffdc7859974c9f5eeb80d8b80a958. 2024-12-10T16:34:25,345 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenIntoWAL,,1733848465289.8d0ffdc7859974c9f5eeb80d8b80a958. 2024-12-10T16:34:25,345 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenIntoWAL,,1733848465289.8d0ffdc7859974c9f5eeb80d8b80a958. after waiting 0 ms 2024-12-10T16:34:25,345 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenIntoWAL,,1733848465289.8d0ffdc7859974c9f5eeb80d8b80a958. 2024-12-10T16:34:25,345 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenIntoWAL,,1733848465289.8d0ffdc7859974c9f5eeb80d8b80a958. 2024-12-10T16:34:25,345 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 8d0ffdc7859974c9f5eeb80d8b80a958: Waiting for close lock at 1733848465344Disabling compacts and flushes for region at 1733848465344Disabling writes for close at 1733848465345 (+1 ms)Writing region close event to WAL at 1733848465345Closed at 1733848465345 2024-12-10T16:34:25,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741853_1029 (size=95) 2024-12-10T16:34:25,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741853_1029 (size=95) 2024-12-10T16:34:25,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741853_1029 (size=95) 2024-12-10T16:34:25,352 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-10T16:34:25,352 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-51715510:(num 1733848465296) 2024-12-10T16:34:25,353 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-10T16:34:25,356 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:35477/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733848465287, archiveDir=hdfs://localhost:35477/hbase/oldWALs, maxLogs=32 2024-12-10T16:34:25,369 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733848465287/wal.1733848465356, exclude list is [], retry=0 2024-12-10T16:34:25,373 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43913,DS-3efadbaf-2f12-4fe8-9fd8-91ba1926bf09,DISK] 2024-12-10T16:34:25,373 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42039,DS-c05f9a42-583c-4f58-bba4-77e13705c0bc,DISK] 2024-12-10T16:34:25,373 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46873,DS-ddfb4a98-1093-42ea-847d-0d86a4cd1023,DISK] 2024-12-10T16:34:25,376 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733848465287/wal.1733848465356 2024-12-10T16:34:25,376 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36795:36795),(127.0.0.1/127.0.0.1:44417:44417),(127.0.0.1/127.0.0.1:42633:42633)] 2024-12-10T16:34:25,547 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:35477/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733848465287/wal.1733848465356, size=0 (0bytes) 2024-12-10T16:34:25,547 WARN [Time-limited test {}] wal.WALSplitter(453): File hdfs://localhost:35477/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733848465287/wal.1733848465356 might be still open, length is 0 2024-12-10T16:34:25,547 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35477/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733848465287/wal.1733848465356 2024-12-10T16:34:25,548 WARN [IPC Server handler 3 on default port 35477 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733848465287/wal.1733848465356 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741855_1031 2024-12-10T16:34:25,548 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35477/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733848465287/wal.1733848465356 after 1ms 2024-12-10T16:34:26,328 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_545150750_22 at /127.0.0.1:39684 [Receiving block BP-1758511473-172.17.0.3-1733848457790:blk_1073741855_1031] {}] datanode.DataXceiver(331): 127.0.0.1:43913:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39684 dst: /127.0.0.1:43913 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:43913 remote=/127.0.0.1:39684]. Total timeout mills is 60000, 59190 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T16:34:26,329 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_545150750_22 at /127.0.0.1:60004 [Receiving block BP-1758511473-172.17.0.3-1733848457790:blk_1073741855_1031] {}] datanode.DataXceiver(331): 127.0.0.1:42039:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60004 dst: /127.0.0.1:42039 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T16:34:26,329 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_545150750_22 at /127.0.0.1:37852 [Receiving block BP-1758511473-172.17.0.3-1733848457790:blk_1073741855_1031] {}] datanode.DataXceiver(331): 127.0.0.1:46873:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37852 dst: /127.0.0.1:46873 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T16:34:26,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741855_1032 (size=263633) 2024-12-10T16:34:26,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741855_1032 (size=263633) 2024-12-10T16:34:26,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741855_1032 (size=263633) 2024-12-10T16:34:29,550 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:35477/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733848465287/wal.1733848465356 after 4003ms 2024-12-10T16:34:29,559 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:35477/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733848465287/wal.1733848465356: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T16:34:29,560 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:35477/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733848465287/wal.1733848465356 took 4014ms 2024-12-10T16:34:29,565 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000001-wal.1733848465356.temp 2024-12-10T16:34:29,567 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/8d0ffdc7859974c9f5eeb80d8b80a958/recovered.edits/0000000000000000001-wal.1733848465356.temp 2024-12-10T16:34:29,576 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-10T16:34:29,651 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-10T16:34:29,680 INFO [Time-limited test {}] wal.WALSplitter(493): EOF from hdfs://localhost:35477/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733848465287/wal.1733848465356; continuing. 2024-12-10T16:34:29,680 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:35477/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733848465287/wal.1733848465356 so closing down 2024-12-10T16:34:29,681 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-10T16:34:29,681 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-10T16:34:29,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741856_1033 (size=263641) 2024-12-10T16:34:29,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741856_1033 (size=263641) 2024-12-10T16:34:29,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741856_1033 (size=263641) 2024-12-10T16:34:29,685 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/8d0ffdc7859974c9f5eeb80d8b80a958/recovered.edits/0000000000000000001-wal.1733848465356.temp (wrote 3002 edits, skipped 0 edits in 71 ms) 2024-12-10T16:34:29,687 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/8d0ffdc7859974c9f5eeb80d8b80a958/recovered.edits/0000000000000000001-wal.1733848465356.temp to hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/8d0ffdc7859974c9f5eeb80d8b80a958/recovered.edits/0000000000000003002 2024-12-10T16:34:29,687 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 3002 edits across 1 Regions in 126 ms; skipped=0; WAL=hdfs://localhost:35477/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733848465287/wal.1733848465356, size=0, length=0, corrupted=false, cancelled=false 2024-12-10T16:34:29,687 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:35477/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733848465287/wal.1733848465356, journal: Splitting hdfs://localhost:35477/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733848465287/wal.1733848465356, size=0 (0bytes) at 1733848465547Creating recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/8d0ffdc7859974c9f5eeb80d8b80a958/recovered.edits/0000000000000000001-wal.1733848465356.temp at 1733848469567 (+4020 ms)Split 1024 edits, skipped 0 edits. at 1733848469627 (+60 ms)Split 2048 edits, skipped 0 edits. at 1733848469656 (+29 ms)Finishing writing output for hdfs://localhost:35477/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733848465287/wal.1733848465356 so closing down at 1733848469681 (+25 ms)3 split writer threads finished at 1733848469681Closed recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/8d0ffdc7859974c9f5eeb80d8b80a958/recovered.edits/0000000000000000001-wal.1733848465356.temp (wrote 3002 edits, skipped 0 edits in 71 ms) at 1733848469685 (+4 ms)Rename recovered edits hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/8d0ffdc7859974c9f5eeb80d8b80a958/recovered.edits/0000000000000000001-wal.1733848465356.temp to hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/8d0ffdc7859974c9f5eeb80d8b80a958/recovered.edits/0000000000000003002 at 1733848469687 (+2 ms)Processed 3002 edits across 1 Regions in 126 ms; skipped=0; WAL=hdfs://localhost:35477/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733848465287/wal.1733848465356, size=0, length=0, corrupted=false, cancelled=false at 1733848469687 2024-12-10T16:34:29,690 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:35477/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733848465287/wal.1733848465356 to hdfs://localhost:35477/hbase/oldWALs/wal.1733848465356 2024-12-10T16:34:29,691 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/8d0ffdc7859974c9f5eeb80d8b80a958/recovered.edits/0000000000000003002 2024-12-10T16:34:29,692 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-10T16:34:29,694 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:35477/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733848465287, archiveDir=hdfs://localhost:35477/hbase/oldWALs, maxLogs=32 2024-12-10T16:34:29,707 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733848465287/wal.1733848469695, exclude list is [], retry=0 2024-12-10T16:34:29,711 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46873,DS-ddfb4a98-1093-42ea-847d-0d86a4cd1023,DISK] 2024-12-10T16:34:29,712 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42039,DS-c05f9a42-583c-4f58-bba4-77e13705c0bc,DISK] 2024-12-10T16:34:29,712 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43913,DS-3efadbaf-2f12-4fe8-9fd8-91ba1926bf09,DISK] 2024-12-10T16:34:29,716 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733848465287/wal.1733848469695 2024-12-10T16:34:29,717 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42633:42633),(127.0.0.1/127.0.0.1:44417:44417),(127.0.0.1/127.0.0.1:36795:36795)] 2024-12-10T16:34:29,717 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1733848465289.8d0ffdc7859974c9f5eeb80d8b80a958.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T16:34:29,719 INFO [StoreOpener-8d0ffdc7859974c9f5eeb80d8b80a958-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 8d0ffdc7859974c9f5eeb80d8b80a958 2024-12-10T16:34:29,721 INFO [StoreOpener-8d0ffdc7859974c9f5eeb80d8b80a958-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8d0ffdc7859974c9f5eeb80d8b80a958 columnFamilyName a 2024-12-10T16:34:29,721 DEBUG [StoreOpener-8d0ffdc7859974c9f5eeb80d8b80a958-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:29,722 INFO [StoreOpener-8d0ffdc7859974c9f5eeb80d8b80a958-1 {}] regionserver.HStore(327): Store=8d0ffdc7859974c9f5eeb80d8b80a958/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:29,722 INFO [StoreOpener-8d0ffdc7859974c9f5eeb80d8b80a958-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 8d0ffdc7859974c9f5eeb80d8b80a958 2024-12-10T16:34:29,723 INFO [StoreOpener-8d0ffdc7859974c9f5eeb80d8b80a958-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8d0ffdc7859974c9f5eeb80d8b80a958 columnFamilyName b 2024-12-10T16:34:29,724 DEBUG [StoreOpener-8d0ffdc7859974c9f5eeb80d8b80a958-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:29,724 INFO [StoreOpener-8d0ffdc7859974c9f5eeb80d8b80a958-1 {}] regionserver.HStore(327): Store=8d0ffdc7859974c9f5eeb80d8b80a958/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:29,725 INFO [StoreOpener-8d0ffdc7859974c9f5eeb80d8b80a958-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 8d0ffdc7859974c9f5eeb80d8b80a958 2024-12-10T16:34:29,726 INFO [StoreOpener-8d0ffdc7859974c9f5eeb80d8b80a958-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8d0ffdc7859974c9f5eeb80d8b80a958 columnFamilyName c 2024-12-10T16:34:29,726 DEBUG [StoreOpener-8d0ffdc7859974c9f5eeb80d8b80a958-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:29,727 INFO [StoreOpener-8d0ffdc7859974c9f5eeb80d8b80a958-1 {}] regionserver.HStore(327): Store=8d0ffdc7859974c9f5eeb80d8b80a958/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:29,727 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 8d0ffdc7859974c9f5eeb80d8b80a958 2024-12-10T16:34:29,728 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/8d0ffdc7859974c9f5eeb80d8b80a958 2024-12-10T16:34:29,730 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/8d0ffdc7859974c9f5eeb80d8b80a958 2024-12-10T16:34:29,731 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/8d0ffdc7859974c9f5eeb80d8b80a958/recovered.edits/0000000000000003002 2024-12-10T16:34:29,734 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/8d0ffdc7859974c9f5eeb80d8b80a958/recovered.edits/0000000000000003002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T16:34:29,769 INFO [Time-limited test {}] wal.AbstractTestWALReplay$4$1(796): InternalFlushCache Invoked 2024-12-10T16:34:30,109 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 8d0ffdc7859974c9f5eeb80d8b80a958 3/3 column families, dataSize=42.49 KB heapSize=100.11 KB 2024-12-10T16:34:30,148 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/8d0ffdc7859974c9f5eeb80d8b80a958/.tmp/a/ceff880a91ce4a99b8a8e741a9b4e37c is 62, key is testReplayEditsWrittenIntoWAL/a:100/1733848465382/Put/seqid=0 2024-12-10T16:34:30,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741858_1035 (size=50463) 2024-12-10T16:34:30,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741858_1035 (size=50463) 2024-12-10T16:34:30,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741858_1035 (size=50463) 2024-12-10T16:34:30,160 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=754 (bloomFilter=true), to=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/8d0ffdc7859974c9f5eeb80d8b80a958/.tmp/a/ceff880a91ce4a99b8a8e741a9b4e37c 2024-12-10T16:34:30,169 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/8d0ffdc7859974c9f5eeb80d8b80a958/.tmp/a/ceff880a91ce4a99b8a8e741a9b4e37c as hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/8d0ffdc7859974c9f5eeb80d8b80a958/a/ceff880a91ce4a99b8a8e741a9b4e37c 2024-12-10T16:34:30,177 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/8d0ffdc7859974c9f5eeb80d8b80a958/a/ceff880a91ce4a99b8a8e741a9b4e37c, entries=754, sequenceid=754, filesize=49.3 K 2024-12-10T16:34:30,177 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~42.49 KB/43512, heapSize ~99.59 KB/101984, currentSize=0 B/0 for 8d0ffdc7859974c9f5eeb80d8b80a958 in 69ms, sequenceid=754, compaction requested=false; wal=null 2024-12-10T16:34:30,201 INFO [Time-limited test {}] wal.AbstractTestWALReplay$4$1(796): InternalFlushCache Invoked 2024-12-10T16:34:30,202 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 8d0ffdc7859974c9f5eeb80d8b80a958 3/3 column families, dataSize=42.49 KB heapSize=100.11 KB 2024-12-10T16:34:30,209 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/8d0ffdc7859974c9f5eeb80d8b80a958/.tmp/a/1d23c090d2d140ad90b738aac8daf4f1 is 62, key is testReplayEditsWrittenIntoWAL/a:754/1733848465411/Put/seqid=0 2024-12-10T16:34:30,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741859_1036 (size=20072) 2024-12-10T16:34:30,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741859_1036 (size=20072) 2024-12-10T16:34:30,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741859_1036 (size=20072) 2024-12-10T16:34:30,220 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.93 KB at sequenceid=1508 (bloomFilter=true), to=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/8d0ffdc7859974c9f5eeb80d8b80a958/.tmp/a/1d23c090d2d140ad90b738aac8daf4f1 2024-12-10T16:34:30,248 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/8d0ffdc7859974c9f5eeb80d8b80a958/.tmp/b/e556b4a951184e349a1e48dd6a94a735 is 62, key is testReplayEditsWrittenIntoWAL/b:100/1733848465439/Put/seqid=0 2024-12-10T16:34:30,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741860_1037 (size=35835) 2024-12-10T16:34:30,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741860_1037 (size=35835) 2024-12-10T16:34:30,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741860_1037 (size=35835) 2024-12-10T16:34:30,258 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=28.56 KB at sequenceid=1508 (bloomFilter=true), to=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/8d0ffdc7859974c9f5eeb80d8b80a958/.tmp/b/e556b4a951184e349a1e48dd6a94a735 2024-12-10T16:34:30,266 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/8d0ffdc7859974c9f5eeb80d8b80a958/.tmp/a/1d23c090d2d140ad90b738aac8daf4f1 as hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/8d0ffdc7859974c9f5eeb80d8b80a958/a/1d23c090d2d140ad90b738aac8daf4f1 2024-12-10T16:34:30,274 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/8d0ffdc7859974c9f5eeb80d8b80a958/a/1d23c090d2d140ad90b738aac8daf4f1, entries=246, sequenceid=1508, filesize=19.6 K 2024-12-10T16:34:30,276 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/8d0ffdc7859974c9f5eeb80d8b80a958/.tmp/b/e556b4a951184e349a1e48dd6a94a735 as hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/8d0ffdc7859974c9f5eeb80d8b80a958/b/e556b4a951184e349a1e48dd6a94a735 2024-12-10T16:34:30,283 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/8d0ffdc7859974c9f5eeb80d8b80a958/b/e556b4a951184e349a1e48dd6a94a735, entries=508, sequenceid=1508, filesize=35.0 K 2024-12-10T16:34:30,284 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~42.49 KB/43512, heapSize ~99.83 KB/102224, currentSize=0 B/0 for 8d0ffdc7859974c9f5eeb80d8b80a958 in 83ms, sequenceid=1508, compaction requested=false; wal=null 2024-12-10T16:34:30,301 INFO [Time-limited test {}] wal.AbstractTestWALReplay$4$1(796): InternalFlushCache Invoked 2024-12-10T16:34:30,302 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 8d0ffdc7859974c9f5eeb80d8b80a958 3/3 column families, dataSize=42.49 KB heapSize=100.11 KB 2024-12-10T16:34:30,308 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/8d0ffdc7859974c9f5eeb80d8b80a958/.tmp/b/3baa51fb16174744957c5d2f099d9e42 is 62, key is testReplayEditsWrittenIntoWAL/b:508/1733848465455/Put/seqid=0 2024-12-10T16:34:30,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741861_1038 (size=35082) 2024-12-10T16:34:30,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741861_1038 (size=35082) 2024-12-10T16:34:30,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741861_1038 (size=35082) 2024-12-10T16:34:30,319 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=27.87 KB at sequenceid=2262 (bloomFilter=true), to=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/8d0ffdc7859974c9f5eeb80d8b80a958/.tmp/b/3baa51fb16174744957c5d2f099d9e42 2024-12-10T16:34:30,342 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/8d0ffdc7859974c9f5eeb80d8b80a958/.tmp/c/d28973f73b3d4fc7a7acb40a83efdc23 is 62, key is testReplayEditsWrittenIntoWAL/c:100/1733848465481/Put/seqid=0 2024-12-10T16:34:30,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741862_1039 (size=20825) 2024-12-10T16:34:30,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741862_1039 (size=20825) 2024-12-10T16:34:30,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741862_1039 (size=20825) 2024-12-10T16:34:30,350 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.63 KB at sequenceid=2262 (bloomFilter=true), to=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/8d0ffdc7859974c9f5eeb80d8b80a958/.tmp/c/d28973f73b3d4fc7a7acb40a83efdc23 2024-12-10T16:34:30,359 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/8d0ffdc7859974c9f5eeb80d8b80a958/.tmp/b/3baa51fb16174744957c5d2f099d9e42 as hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/8d0ffdc7859974c9f5eeb80d8b80a958/b/3baa51fb16174744957c5d2f099d9e42 2024-12-10T16:34:30,367 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/8d0ffdc7859974c9f5eeb80d8b80a958/b/3baa51fb16174744957c5d2f099d9e42, entries=492, sequenceid=2262, filesize=34.3 K 2024-12-10T16:34:30,369 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/8d0ffdc7859974c9f5eeb80d8b80a958/.tmp/c/d28973f73b3d4fc7a7acb40a83efdc23 as hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/8d0ffdc7859974c9f5eeb80d8b80a958/c/d28973f73b3d4fc7a7acb40a83efdc23 2024-12-10T16:34:30,376 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/8d0ffdc7859974c9f5eeb80d8b80a958/c/d28973f73b3d4fc7a7acb40a83efdc23, entries=262, sequenceid=2262, filesize=20.3 K 2024-12-10T16:34:30,376 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~42.49 KB/43512, heapSize ~99.83 KB/102224, currentSize=0 B/0 for 8d0ffdc7859974c9f5eeb80d8b80a958 in 74ms, sequenceid=2262, compaction requested=false; wal=null 2024-12-10T16:34:30,387 WARN [Time-limited test {}] regionserver.HRegion(5722): No family for cell testReplayEditsWrittenIntoWAL/another family:testReplayEditsWrittenIntoWAL/1733848465515/Put/vlen=29/seqid=0 in region testReplayEditsWrittenIntoWAL,,1733848465289.8d0ffdc7859974c9f5eeb80d8b80a958. 2024-12-10T16:34:30,390 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 3001, skipped 1, firstSequenceIdInLog=1, maxSequenceIdInLog=3002, path=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/8d0ffdc7859974c9f5eeb80d8b80a958/recovered.edits/0000000000000003002 2024-12-10T16:34:30,390 INFO [Time-limited test {}] wal.AbstractTestWALReplay$4$1(796): InternalFlushCache Invoked 2024-12-10T16:34:30,391 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 8d0ffdc7859974c9f5eeb80d8b80a958 3/3 column families, dataSize=41.85 KB heapSize=98.89 KB 2024-12-10T16:34:30,401 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/8d0ffdc7859974c9f5eeb80d8b80a958/.tmp/c/ec21659f1f92468eb9ebf10e2773f565 is 62, key is testReplayEditsWrittenIntoWAL/c:262/1733848465487/Put/seqid=0 2024-12-10T16:34:30,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741863_1040 (size=50301) 2024-12-10T16:34:30,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741863_1040 (size=50301) 2024-12-10T16:34:30,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741863_1040 (size=50301) 2024-12-10T16:34:30,411 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=41.85 KB at sequenceid=3002 (bloomFilter=true), to=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/8d0ffdc7859974c9f5eeb80d8b80a958/.tmp/c/ec21659f1f92468eb9ebf10e2773f565 2024-12-10T16:34:30,417 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for ec21659f1f92468eb9ebf10e2773f565 2024-12-10T16:34:30,419 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/8d0ffdc7859974c9f5eeb80d8b80a958/.tmp/c/ec21659f1f92468eb9ebf10e2773f565 as hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/8d0ffdc7859974c9f5eeb80d8b80a958/c/ec21659f1f92468eb9ebf10e2773f565 2024-12-10T16:34:30,425 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for ec21659f1f92468eb9ebf10e2773f565 2024-12-10T16:34:30,425 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/8d0ffdc7859974c9f5eeb80d8b80a958/c/ec21659f1f92468eb9ebf10e2773f565, entries=739, sequenceid=3002, filesize=49.1 K 2024-12-10T16:34:30,426 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~41.85 KB/42854, heapSize ~98.38 KB/100736, currentSize=0 B/0 for 8d0ffdc7859974c9f5eeb80d8b80a958 in 35ms, sequenceid=3002, compaction requested=false; wal=null 2024-12-10T16:34:30,427 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/8d0ffdc7859974c9f5eeb80d8b80a958/recovered.edits/0000000000000003002 2024-12-10T16:34:30,428 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 8d0ffdc7859974c9f5eeb80d8b80a958 2024-12-10T16:34:30,428 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 8d0ffdc7859974c9f5eeb80d8b80a958 2024-12-10T16:34:30,429 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenIntoWAL descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-10T16:34:30,431 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 8d0ffdc7859974c9f5eeb80d8b80a958 2024-12-10T16:34:30,433 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenIntoWAL/8d0ffdc7859974c9f5eeb80d8b80a958/recovered.edits/3002.seqid, newMaxSeqId=3002, maxSeqId=1 2024-12-10T16:34:30,434 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 8d0ffdc7859974c9f5eeb80d8b80a958; next sequenceid=3003; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=204800, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68148709, jitterRate=0.015494897961616516}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-10T16:34:30,434 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 8d0ffdc7859974c9f5eeb80d8b80a958: Writing region info on filesystem at 1733848469717Initializing all the Stores at 1733848469719 (+2 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848469719Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848469719Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848469719Cleaning up temporary data from old regions at 1733848470428 (+709 ms)Region opened successfully at 1733848470434 (+6 ms) 2024-12-10T16:34:30,495 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 8d0ffdc7859974c9f5eeb80d8b80a958, disabling compactions & flushes 2024-12-10T16:34:30,495 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenIntoWAL,,1733848465289.8d0ffdc7859974c9f5eeb80d8b80a958. 2024-12-10T16:34:30,495 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenIntoWAL,,1733848465289.8d0ffdc7859974c9f5eeb80d8b80a958. 2024-12-10T16:34:30,495 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenIntoWAL,,1733848465289.8d0ffdc7859974c9f5eeb80d8b80a958. after waiting 0 ms 2024-12-10T16:34:30,496 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenIntoWAL,,1733848465289.8d0ffdc7859974c9f5eeb80d8b80a958. 2024-12-10T16:34:30,498 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenIntoWAL,,1733848465289.8d0ffdc7859974c9f5eeb80d8b80a958. 2024-12-10T16:34:30,498 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 8d0ffdc7859974c9f5eeb80d8b80a958: Waiting for close lock at 1733848470495Disabling compacts and flushes for region at 1733848470495Disabling writes for close at 1733848470496 (+1 ms)Writing region close event to WAL at 1733848470498 (+2 ms)Closed at 1733848470498 2024-12-10T16:34:30,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741857_1034 (size=95) 2024-12-10T16:34:30,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741857_1034 (size=95) 2024-12-10T16:34:30,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741857_1034 (size=95) 2024-12-10T16:34:30,506 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-10T16:34:30,506 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1733848469695) 2024-12-10T16:34:30,521 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsWrittenIntoWAL Thread=403 (was 387) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_972529285_22 at /127.0.0.1:59744 [Waiting for operation #18] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-12-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-12-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/cluster_2abc8474-d2bc-97ae-7082-3f3155e63df1/data/data4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/cluster_2abc8474-d2bc-97ae-7082-3f3155e63df1/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_972529285_22 at /127.0.0.1:60050 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/cluster_2abc8474-d2bc-97ae-7082-3f3155e63df1/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.replay.wal.secondtime@localhost:35477 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1319272049) connection to localhost/127.0.0.1:45691 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_972529285_22 at /127.0.0.1:54802 [Waiting for operation #9] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1319272049) connection to localhost/127.0.0.1:45921 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (1319272049) connection to localhost/127.0.0.1:35477 from jenkins.replay.wal.secondtime java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: AsyncFSWAL-12-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45691 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45921 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_972529285_22 at /127.0.0.1:39704 [Waiting for operation #19] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/cluster_2abc8474-d2bc-97ae-7082-3f3155e63df1/data/data3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=865 (was 785) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=183 (was 190), ProcessCount=11 (was 11), AvailableMemoryMB=5199 (was 5269) 2024-12-10T16:34:30,532 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#test2727 Thread=403, OpenFileDescriptor=865, MaxFileDescriptor=1048576, SystemLoadAverage=183, ProcessCount=11, AvailableMemoryMB=5198 2024-12-10T16:34:30,546 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T16:34:30,549 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T16:34:30,549 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T16:34:30,552 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-09212395, suffix=, logDir=hdfs://localhost:35477/hbase/WALs/hregion-09212395, archiveDir=hdfs://localhost:35477/hbase/oldWALs, maxLogs=32 2024-12-10T16:34:30,564 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-09212395/hregion-09212395.1733848470552, exclude list is [], retry=0 2024-12-10T16:34:30,567 DEBUG [AsyncFSWAL-14-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46873,DS-ddfb4a98-1093-42ea-847d-0d86a4cd1023,DISK] 2024-12-10T16:34:30,567 DEBUG [AsyncFSWAL-14-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42039,DS-c05f9a42-583c-4f58-bba4-77e13705c0bc,DISK] 2024-12-10T16:34:30,567 DEBUG [AsyncFSWAL-14-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43913,DS-3efadbaf-2f12-4fe8-9fd8-91ba1926bf09,DISK] 2024-12-10T16:34:30,570 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-09212395/hregion-09212395.1733848470552 2024-12-10T16:34:30,570 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42633:42633),(127.0.0.1/127.0.0.1:44417:44417),(127.0.0.1/127.0.0.1:36795:36795)] 2024-12-10T16:34:30,570 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => ef5d5a618740d31b7e380adbd000f6da, NAME => 'test2727,,1733848470547.ef5d5a618740d31b7e380adbd000f6da.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='test2727', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35477/hbase 2024-12-10T16:34:30,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741865_1042 (size=43) 2024-12-10T16:34:30,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741865_1042 (size=43) 2024-12-10T16:34:30,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741865_1042 (size=43) 2024-12-10T16:34:30,589 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated test2727,,1733848470547.ef5d5a618740d31b7e380adbd000f6da.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T16:34:30,590 INFO [StoreOpener-ef5d5a618740d31b7e380adbd000f6da-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region ef5d5a618740d31b7e380adbd000f6da 2024-12-10T16:34:30,592 INFO [StoreOpener-ef5d5a618740d31b7e380adbd000f6da-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ef5d5a618740d31b7e380adbd000f6da columnFamilyName a 2024-12-10T16:34:30,592 DEBUG [StoreOpener-ef5d5a618740d31b7e380adbd000f6da-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:30,593 INFO [StoreOpener-ef5d5a618740d31b7e380adbd000f6da-1 {}] regionserver.HStore(327): Store=ef5d5a618740d31b7e380adbd000f6da/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:30,593 INFO [StoreOpener-ef5d5a618740d31b7e380adbd000f6da-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region ef5d5a618740d31b7e380adbd000f6da 2024-12-10T16:34:30,595 INFO [StoreOpener-ef5d5a618740d31b7e380adbd000f6da-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ef5d5a618740d31b7e380adbd000f6da columnFamilyName b 2024-12-10T16:34:30,595 DEBUG [StoreOpener-ef5d5a618740d31b7e380adbd000f6da-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:30,596 INFO [StoreOpener-ef5d5a618740d31b7e380adbd000f6da-1 {}] regionserver.HStore(327): Store=ef5d5a618740d31b7e380adbd000f6da/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:30,596 INFO [StoreOpener-ef5d5a618740d31b7e380adbd000f6da-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region ef5d5a618740d31b7e380adbd000f6da 2024-12-10T16:34:30,598 INFO [StoreOpener-ef5d5a618740d31b7e380adbd000f6da-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ef5d5a618740d31b7e380adbd000f6da columnFamilyName c 2024-12-10T16:34:30,598 DEBUG [StoreOpener-ef5d5a618740d31b7e380adbd000f6da-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:30,598 INFO [StoreOpener-ef5d5a618740d31b7e380adbd000f6da-1 {}] regionserver.HStore(327): Store=ef5d5a618740d31b7e380adbd000f6da/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:30,598 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for ef5d5a618740d31b7e380adbd000f6da 2024-12-10T16:34:30,599 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/test2727/ef5d5a618740d31b7e380adbd000f6da 2024-12-10T16:34:30,599 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/test2727/ef5d5a618740d31b7e380adbd000f6da 2024-12-10T16:34:30,601 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for ef5d5a618740d31b7e380adbd000f6da 2024-12-10T16:34:30,601 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for ef5d5a618740d31b7e380adbd000f6da 2024-12-10T16:34:30,601 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table test2727 descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-10T16:34:30,602 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for ef5d5a618740d31b7e380adbd000f6da 2024-12-10T16:34:30,605 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35477/hbase/data/default/test2727/ef5d5a618740d31b7e380adbd000f6da/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T16:34:30,606 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened ef5d5a618740d31b7e380adbd000f6da; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66132652, jitterRate=-0.014546692371368408}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-10T16:34:30,607 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for ef5d5a618740d31b7e380adbd000f6da: Writing region info on filesystem at 1733848470589Initializing all the Stores at 1733848470590 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848470590Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848470590Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848470590Cleaning up temporary data from old regions at 1733848470601 (+11 ms)Region opened successfully at 1733848470607 (+6 ms) 2024-12-10T16:34:30,607 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing ef5d5a618740d31b7e380adbd000f6da, disabling compactions & flushes 2024-12-10T16:34:30,607 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region test2727,,1733848470547.ef5d5a618740d31b7e380adbd000f6da. 2024-12-10T16:34:30,607 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on test2727,,1733848470547.ef5d5a618740d31b7e380adbd000f6da. 2024-12-10T16:34:30,607 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on test2727,,1733848470547.ef5d5a618740d31b7e380adbd000f6da. after waiting 0 ms 2024-12-10T16:34:30,607 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region test2727,,1733848470547.ef5d5a618740d31b7e380adbd000f6da. 2024-12-10T16:34:30,608 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed test2727,,1733848470547.ef5d5a618740d31b7e380adbd000f6da. 2024-12-10T16:34:30,608 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for ef5d5a618740d31b7e380adbd000f6da: Waiting for close lock at 1733848470607Disabling compacts and flushes for region at 1733848470607Disabling writes for close at 1733848470607Writing region close event to WAL at 1733848470608 (+1 ms)Closed at 1733848470608 2024-12-10T16:34:30,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741864_1041 (size=95) 2024-12-10T16:34:30,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741864_1041 (size=95) 2024-12-10T16:34:30,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741864_1041 (size=95) 2024-12-10T16:34:30,613 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-10T16:34:30,613 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-09212395:(num 1733848470552) 2024-12-10T16:34:30,613 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-10T16:34:30,615 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:35477/hbase/WALs/test2727-manual,16010,1733848470546, archiveDir=hdfs://localhost:35477/hbase/oldWALs, maxLogs=32 2024-12-10T16:34:30,627 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/test2727-manual,16010,1733848470546/wal.1733848470616, exclude list is [], retry=0 2024-12-10T16:34:30,630 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43913,DS-3efadbaf-2f12-4fe8-9fd8-91ba1926bf09,DISK] 2024-12-10T16:34:30,630 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42039,DS-c05f9a42-583c-4f58-bba4-77e13705c0bc,DISK] 2024-12-10T16:34:30,631 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46873,DS-ddfb4a98-1093-42ea-847d-0d86a4cd1023,DISK] 2024-12-10T16:34:30,632 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/test2727-manual,16010,1733848470546/wal.1733848470616 2024-12-10T16:34:30,633 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36795:36795),(127.0.0.1/127.0.0.1:44417:44417),(127.0.0.1/127.0.0.1:42633:42633)] 2024-12-10T16:34:30,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741866_1043 (size=263359) 2024-12-10T16:34:30,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741866_1043 (size=263359) 2024-12-10T16:34:30,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741866_1043 (size=263359) 2024-12-10T16:34:30,801 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:35477/hbase/WALs/test2727-manual,16010,1733848470546/wal.1733848470616, size=257.2 K (263359bytes) 2024-12-10T16:34:30,801 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35477/hbase/WALs/test2727-manual,16010,1733848470546/wal.1733848470616 2024-12-10T16:34:30,802 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:35477/hbase/WALs/test2727-manual,16010,1733848470546/wal.1733848470616 after 1ms 2024-12-10T16:34:30,805 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:35477/hbase/WALs/test2727-manual,16010,1733848470546/wal.1733848470616: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T16:34:30,807 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:35477/hbase/WALs/test2727-manual,16010,1733848470546/wal.1733848470616 took 6ms 2024-12-10T16:34:30,811 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000001-wal.1733848470616.temp 2024-12-10T16:34:30,813 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:35477/hbase/data/default/test2727/ef5d5a618740d31b7e380adbd000f6da/recovered.edits/0000000000000000001-wal.1733848470616.temp 2024-12-10T16:34:30,869 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:35477/hbase/WALs/test2727-manual,16010,1733848470546/wal.1733848470616 so closing down 2024-12-10T16:34:30,869 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-10T16:34:30,869 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-10T16:34:30,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741867_1044 (size=263359) 2024-12-10T16:34:30,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741867_1044 (size=263359) 2024-12-10T16:34:30,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741867_1044 (size=263359) 2024-12-10T16:34:30,873 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:35477/hbase/data/default/test2727/ef5d5a618740d31b7e380adbd000f6da/recovered.edits/0000000000000000001-wal.1733848470616.temp (wrote 3000 edits, skipped 0 edits in 38 ms) 2024-12-10T16:34:30,875 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:35477/hbase/data/default/test2727/ef5d5a618740d31b7e380adbd000f6da/recovered.edits/0000000000000000001-wal.1733848470616.temp to hdfs://localhost:35477/hbase/data/default/test2727/ef5d5a618740d31b7e380adbd000f6da/recovered.edits/0000000000000003000 2024-12-10T16:34:30,875 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 3000 edits across 1 Regions in 68 ms; skipped=0; WAL=hdfs://localhost:35477/hbase/WALs/test2727-manual,16010,1733848470546/wal.1733848470616, size=257.2 K, length=263359, corrupted=false, cancelled=false 2024-12-10T16:34:30,875 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:35477/hbase/WALs/test2727-manual,16010,1733848470546/wal.1733848470616, journal: Splitting hdfs://localhost:35477/hbase/WALs/test2727-manual,16010,1733848470546/wal.1733848470616, size=257.2 K (263359bytes) at 1733848470801Creating recovered edits writer path=hdfs://localhost:35477/hbase/data/default/test2727/ef5d5a618740d31b7e380adbd000f6da/recovered.edits/0000000000000000001-wal.1733848470616.temp at 1733848470813 (+12 ms)Split 1024 edits, skipped 0 edits. at 1733848470829 (+16 ms)Split 2048 edits, skipped 0 edits. at 1733848470849 (+20 ms)Finishing writing output for hdfs://localhost:35477/hbase/WALs/test2727-manual,16010,1733848470546/wal.1733848470616 so closing down at 1733848470869 (+20 ms)3 split writer threads finished at 1733848470869Closed recovered edits writer path=hdfs://localhost:35477/hbase/data/default/test2727/ef5d5a618740d31b7e380adbd000f6da/recovered.edits/0000000000000000001-wal.1733848470616.temp (wrote 3000 edits, skipped 0 edits in 38 ms) at 1733848470873 (+4 ms)Rename recovered edits hdfs://localhost:35477/hbase/data/default/test2727/ef5d5a618740d31b7e380adbd000f6da/recovered.edits/0000000000000000001-wal.1733848470616.temp to hdfs://localhost:35477/hbase/data/default/test2727/ef5d5a618740d31b7e380adbd000f6da/recovered.edits/0000000000000003000 at 1733848470875 (+2 ms)Processed 3000 edits across 1 Regions in 68 ms; skipped=0; WAL=hdfs://localhost:35477/hbase/WALs/test2727-manual,16010,1733848470546/wal.1733848470616, size=257.2 K, length=263359, corrupted=false, cancelled=false at 1733848470875 2024-12-10T16:34:30,877 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:35477/hbase/WALs/test2727-manual,16010,1733848470546/wal.1733848470616 to hdfs://localhost:35477/hbase/oldWALs/wal.1733848470616 2024-12-10T16:34:30,878 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:35477/hbase/data/default/test2727/ef5d5a618740d31b7e380adbd000f6da/recovered.edits/0000000000000003000 2024-12-10T16:34:30,878 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-10T16:34:30,881 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:35477/hbase/WALs/test2727-manual,16010,1733848470546, archiveDir=hdfs://localhost:35477/hbase/oldWALs, maxLogs=32 2024-12-10T16:34:30,900 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/test2727-manual,16010,1733848470546/wal.1733848470882, exclude list is [], retry=0 2024-12-10T16:34:30,904 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46873,DS-ddfb4a98-1093-42ea-847d-0d86a4cd1023,DISK] 2024-12-10T16:34:30,904 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42039,DS-c05f9a42-583c-4f58-bba4-77e13705c0bc,DISK] 2024-12-10T16:34:30,904 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43913,DS-3efadbaf-2f12-4fe8-9fd8-91ba1926bf09,DISK] 2024-12-10T16:34:30,908 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/test2727-manual,16010,1733848470546/wal.1733848470882 2024-12-10T16:34:30,908 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42633:42633),(127.0.0.1/127.0.0.1:44417:44417),(127.0.0.1/127.0.0.1:36795:36795)] 2024-12-10T16:34:31,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741868_1045 (size=263486) 2024-12-10T16:34:31,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741868_1045 (size=263486) 2024-12-10T16:34:31,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741868_1045 (size=263486) 2024-12-10T16:34:31,061 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:35477/hbase/WALs/test2727-manual,16010,1733848470546/wal.1733848470882, size=257.3 K (263486bytes) 2024-12-10T16:34:31,061 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35477/hbase/WALs/test2727-manual,16010,1733848470546/wal.1733848470882 2024-12-10T16:34:31,061 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:35477/hbase/WALs/test2727-manual,16010,1733848470546/wal.1733848470882 after 0ms 2024-12-10T16:34:31,065 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:35477/hbase/WALs/test2727-manual,16010,1733848470546/wal.1733848470882: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T16:34:31,067 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:35477/hbase/WALs/test2727-manual,16010,1733848470546/wal.1733848470882 took 7ms 2024-12-10T16:34:31,073 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000003001-wal.1733848470882.temp 2024-12-10T16:34:31,075 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:35477/hbase/data/default/test2727/ef5d5a618740d31b7e380adbd000f6da/recovered.edits/0000000000000003001-wal.1733848470882.temp 2024-12-10T16:34:31,121 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:35477/hbase/WALs/test2727-manual,16010,1733848470546/wal.1733848470882 so closing down 2024-12-10T16:34:31,121 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-10T16:34:31,122 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-10T16:34:31,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741869_1046 (size=263486) 2024-12-10T16:34:31,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741869_1046 (size=263486) 2024-12-10T16:34:31,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741869_1046 (size=263486) 2024-12-10T16:34:31,126 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:35477/hbase/data/default/test2727/ef5d5a618740d31b7e380adbd000f6da/recovered.edits/0000000000000003001-wal.1733848470882.temp (wrote 3000 edits, skipped 0 edits in 35 ms) 2024-12-10T16:34:31,128 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:35477/hbase/data/default/test2727/ef5d5a618740d31b7e380adbd000f6da/recovered.edits/0000000000000003001-wal.1733848470882.temp to hdfs://localhost:35477/hbase/data/default/test2727/ef5d5a618740d31b7e380adbd000f6da/recovered.edits/0000000000000006000 2024-12-10T16:34:31,129 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 3000 edits across 1 Regions in 59 ms; skipped=0; WAL=hdfs://localhost:35477/hbase/WALs/test2727-manual,16010,1733848470546/wal.1733848470882, size=257.3 K, length=263486, corrupted=false, cancelled=false 2024-12-10T16:34:31,129 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:35477/hbase/WALs/test2727-manual,16010,1733848470546/wal.1733848470882, journal: Splitting hdfs://localhost:35477/hbase/WALs/test2727-manual,16010,1733848470546/wal.1733848470882, size=257.3 K (263486bytes) at 1733848471061Creating recovered edits writer path=hdfs://localhost:35477/hbase/data/default/test2727/ef5d5a618740d31b7e380adbd000f6da/recovered.edits/0000000000000003001-wal.1733848470882.temp at 1733848471075 (+14 ms)Split 1024 edits, skipped 0 edits. at 1733848471089 (+14 ms)Split 2048 edits, skipped 0 edits. at 1733848471105 (+16 ms)Finishing writing output for hdfs://localhost:35477/hbase/WALs/test2727-manual,16010,1733848470546/wal.1733848470882 so closing down at 1733848471121 (+16 ms)3 split writer threads finished at 1733848471122 (+1 ms)Closed recovered edits writer path=hdfs://localhost:35477/hbase/data/default/test2727/ef5d5a618740d31b7e380adbd000f6da/recovered.edits/0000000000000003001-wal.1733848470882.temp (wrote 3000 edits, skipped 0 edits in 35 ms) at 1733848471126 (+4 ms)Rename recovered edits hdfs://localhost:35477/hbase/data/default/test2727/ef5d5a618740d31b7e380adbd000f6da/recovered.edits/0000000000000003001-wal.1733848470882.temp to hdfs://localhost:35477/hbase/data/default/test2727/ef5d5a618740d31b7e380adbd000f6da/recovered.edits/0000000000000006000 at 1733848471128 (+2 ms)Processed 3000 edits across 1 Regions in 59 ms; skipped=0; WAL=hdfs://localhost:35477/hbase/WALs/test2727-manual,16010,1733848470546/wal.1733848470882, size=257.3 K, length=263486, corrupted=false, cancelled=false at 1733848471129 (+1 ms) 2024-12-10T16:34:31,131 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:35477/hbase/WALs/test2727-manual,16010,1733848470546/wal.1733848470882 to hdfs://localhost:35477/hbase/oldWALs/wal.1733848470882 2024-12-10T16:34:31,133 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:35477/hbase/data/default/test2727/ef5d5a618740d31b7e380adbd000f6da/recovered.edits/0000000000000006000 2024-12-10T16:34:31,133 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-10T16:34:31,136 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:35477/hbase/WALs/test2727-manual,16010,1733848470546, archiveDir=hdfs://localhost:35477/hbase/oldWALs, maxLogs=32 2024-12-10T16:34:31,156 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/test2727-manual,16010,1733848470546/wal.1733848471136, exclude list is [], retry=0 2024-12-10T16:34:31,160 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46873,DS-ddfb4a98-1093-42ea-847d-0d86a4cd1023,DISK] 2024-12-10T16:34:31,160 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42039,DS-c05f9a42-583c-4f58-bba4-77e13705c0bc,DISK] 2024-12-10T16:34:31,161 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43913,DS-3efadbaf-2f12-4fe8-9fd8-91ba1926bf09,DISK] 2024-12-10T16:34:31,163 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/test2727-manual,16010,1733848470546/wal.1733848471136 2024-12-10T16:34:31,164 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42633:42633),(127.0.0.1/127.0.0.1:44417:44417),(127.0.0.1/127.0.0.1:36795:36795)] 2024-12-10T16:34:31,164 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => ef5d5a618740d31b7e380adbd000f6da, NAME => 'test2727,,1733848470547.ef5d5a618740d31b7e380adbd000f6da.', STARTKEY => '', ENDKEY => ''} 2024-12-10T16:34:31,164 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated test2727,,1733848470547.ef5d5a618740d31b7e380adbd000f6da.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T16:34:31,164 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for ef5d5a618740d31b7e380adbd000f6da 2024-12-10T16:34:31,164 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for ef5d5a618740d31b7e380adbd000f6da 2024-12-10T16:34:31,166 INFO [StoreOpener-ef5d5a618740d31b7e380adbd000f6da-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region ef5d5a618740d31b7e380adbd000f6da 2024-12-10T16:34:31,168 INFO [StoreOpener-ef5d5a618740d31b7e380adbd000f6da-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ef5d5a618740d31b7e380adbd000f6da columnFamilyName a 2024-12-10T16:34:31,168 DEBUG [StoreOpener-ef5d5a618740d31b7e380adbd000f6da-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:31,169 INFO [StoreOpener-ef5d5a618740d31b7e380adbd000f6da-1 {}] regionserver.HStore(327): Store=ef5d5a618740d31b7e380adbd000f6da/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:31,169 INFO [StoreOpener-ef5d5a618740d31b7e380adbd000f6da-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region ef5d5a618740d31b7e380adbd000f6da 2024-12-10T16:34:31,170 INFO [StoreOpener-ef5d5a618740d31b7e380adbd000f6da-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ef5d5a618740d31b7e380adbd000f6da columnFamilyName b 2024-12-10T16:34:31,170 DEBUG [StoreOpener-ef5d5a618740d31b7e380adbd000f6da-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:31,171 INFO [StoreOpener-ef5d5a618740d31b7e380adbd000f6da-1 {}] regionserver.HStore(327): Store=ef5d5a618740d31b7e380adbd000f6da/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:31,171 INFO [StoreOpener-ef5d5a618740d31b7e380adbd000f6da-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region ef5d5a618740d31b7e380adbd000f6da 2024-12-10T16:34:31,172 INFO [StoreOpener-ef5d5a618740d31b7e380adbd000f6da-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ef5d5a618740d31b7e380adbd000f6da columnFamilyName c 2024-12-10T16:34:31,172 DEBUG [StoreOpener-ef5d5a618740d31b7e380adbd000f6da-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:31,172 INFO [StoreOpener-ef5d5a618740d31b7e380adbd000f6da-1 {}] regionserver.HStore(327): Store=ef5d5a618740d31b7e380adbd000f6da/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:31,173 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for ef5d5a618740d31b7e380adbd000f6da 2024-12-10T16:34:31,173 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/test2727/ef5d5a618740d31b7e380adbd000f6da 2024-12-10T16:34:31,176 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 2 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/test2727/ef5d5a618740d31b7e380adbd000f6da 2024-12-10T16:34:31,176 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:35477/hbase/data/default/test2727/ef5d5a618740d31b7e380adbd000f6da/recovered.edits/0000000000000003000 2024-12-10T16:34:31,179 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:35477/hbase/data/default/test2727/ef5d5a618740d31b7e380adbd000f6da/recovered.edits/0000000000000003000: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T16:34:31,235 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 3000, skipped 0, firstSequenceIdInLog=1, maxSequenceIdInLog=3000, path=hdfs://localhost:35477/hbase/data/default/test2727/ef5d5a618740d31b7e380adbd000f6da/recovered.edits/0000000000000003000 2024-12-10T16:34:31,237 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:35477/hbase/data/default/test2727/ef5d5a618740d31b7e380adbd000f6da/recovered.edits/0000000000000006000 2024-12-10T16:34:31,240 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:35477/hbase/data/default/test2727/ef5d5a618740d31b7e380adbd000f6da/recovered.edits/0000000000000006000: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T16:34:31,296 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 3000, skipped 0, firstSequenceIdInLog=3001, maxSequenceIdInLog=6000, path=hdfs://localhost:35477/hbase/data/default/test2727/ef5d5a618740d31b7e380adbd000f6da/recovered.edits/0000000000000006000 2024-12-10T16:34:31,297 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing ef5d5a618740d31b7e380adbd000f6da 3/3 column families, dataSize=215.51 KB heapSize=657 KB 2024-12-10T16:34:31,326 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35477/hbase/data/default/test2727/ef5d5a618740d31b7e380adbd000f6da/.tmp/a/1103b31fb4c0461b90dd1c40761675a1 is 41, key is test2727/a:100/1733848470912/Put/seqid=0 2024-12-10T16:34:31,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741871_1048 (size=84227) 2024-12-10T16:34:31,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741871_1048 (size=84227) 2024-12-10T16:34:31,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741871_1048 (size=84227) 2024-12-10T16:34:31,335 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=71.84 KB at sequenceid=6000 (bloomFilter=true), to=hdfs://localhost:35477/hbase/data/default/test2727/ef5d5a618740d31b7e380adbd000f6da/.tmp/a/1103b31fb4c0461b90dd1c40761675a1 2024-12-10T16:34:31,366 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35477/hbase/data/default/test2727/ef5d5a618740d31b7e380adbd000f6da/.tmp/b/30e4bb663ea3471bbd564804cbd43026 is 41, key is test2727/b:100/1733848470955/Put/seqid=0 2024-12-10T16:34:31,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741872_1049 (size=84609) 2024-12-10T16:34:31,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741872_1049 (size=84609) 2024-12-10T16:34:31,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741872_1049 (size=84609) 2024-12-10T16:34:31,479 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-10T16:34:31,480 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-10T16:34:31,485 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-10T16:34:31,486 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-10T16:34:31,486 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-10T16:34:31,487 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-10T16:34:31,488 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsWrittenIntoWAL 2024-12-10T16:34:31,488 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsWrittenIntoWAL Metrics about Tables on a single HBase RegionServer 2024-12-10T16:34:31,775 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=71.84 KB at sequenceid=6000 (bloomFilter=true), to=hdfs://localhost:35477/hbase/data/default/test2727/ef5d5a618740d31b7e380adbd000f6da/.tmp/b/30e4bb663ea3471bbd564804cbd43026 2024-12-10T16:34:31,807 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35477/hbase/data/default/test2727/ef5d5a618740d31b7e380adbd000f6da/.tmp/c/25f053be8e184f4eb1a4636dad20913e is 41, key is test2727/c:100/1733848470999/Put/seqid=0 2024-12-10T16:34:31,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741873_1050 (size=84609) 2024-12-10T16:34:31,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741873_1050 (size=84609) 2024-12-10T16:34:31,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741873_1050 (size=84609) 2024-12-10T16:34:31,815 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=71.84 KB at sequenceid=6000 (bloomFilter=true), to=hdfs://localhost:35477/hbase/data/default/test2727/ef5d5a618740d31b7e380adbd000f6da/.tmp/c/25f053be8e184f4eb1a4636dad20913e 2024-12-10T16:34:31,822 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35477/hbase/data/default/test2727/ef5d5a618740d31b7e380adbd000f6da/.tmp/a/1103b31fb4c0461b90dd1c40761675a1 as hdfs://localhost:35477/hbase/data/default/test2727/ef5d5a618740d31b7e380adbd000f6da/a/1103b31fb4c0461b90dd1c40761675a1 2024-12-10T16:34:31,830 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35477/hbase/data/default/test2727/ef5d5a618740d31b7e380adbd000f6da/a/1103b31fb4c0461b90dd1c40761675a1, entries=2000, sequenceid=6000, filesize=82.3 K 2024-12-10T16:34:31,831 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35477/hbase/data/default/test2727/ef5d5a618740d31b7e380adbd000f6da/.tmp/b/30e4bb663ea3471bbd564804cbd43026 as hdfs://localhost:35477/hbase/data/default/test2727/ef5d5a618740d31b7e380adbd000f6da/b/30e4bb663ea3471bbd564804cbd43026 2024-12-10T16:34:31,839 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35477/hbase/data/default/test2727/ef5d5a618740d31b7e380adbd000f6da/b/30e4bb663ea3471bbd564804cbd43026, entries=2000, sequenceid=6000, filesize=82.6 K 2024-12-10T16:34:31,840 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35477/hbase/data/default/test2727/ef5d5a618740d31b7e380adbd000f6da/.tmp/c/25f053be8e184f4eb1a4636dad20913e as hdfs://localhost:35477/hbase/data/default/test2727/ef5d5a618740d31b7e380adbd000f6da/c/25f053be8e184f4eb1a4636dad20913e 2024-12-10T16:34:31,847 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35477/hbase/data/default/test2727/ef5d5a618740d31b7e380adbd000f6da/c/25f053be8e184f4eb1a4636dad20913e, entries=2000, sequenceid=6000, filesize=82.6 K 2024-12-10T16:34:31,848 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~215.51 KB/220680, heapSize ~656.95 KB/672720, currentSize=0 B/0 for ef5d5a618740d31b7e380adbd000f6da in 552ms, sequenceid=6000, compaction requested=false; wal=null 2024-12-10T16:34:31,849 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:35477/hbase/data/default/test2727/ef5d5a618740d31b7e380adbd000f6da/recovered.edits/0000000000000003000 2024-12-10T16:34:31,850 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:35477/hbase/data/default/test2727/ef5d5a618740d31b7e380adbd000f6da/recovered.edits/0000000000000006000 2024-12-10T16:34:31,851 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for ef5d5a618740d31b7e380adbd000f6da 2024-12-10T16:34:31,851 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for ef5d5a618740d31b7e380adbd000f6da 2024-12-10T16:34:31,852 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table test2727 descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-10T16:34:31,854 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for ef5d5a618740d31b7e380adbd000f6da 2024-12-10T16:34:31,857 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35477/hbase/data/default/test2727/ef5d5a618740d31b7e380adbd000f6da/recovered.edits/6000.seqid, newMaxSeqId=6000, maxSeqId=1 2024-12-10T16:34:31,859 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened ef5d5a618740d31b7e380adbd000f6da; next sequenceid=6001; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61771087, jitterRate=-0.07953907549381256}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-10T16:34:31,860 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for ef5d5a618740d31b7e380adbd000f6da: Writing region info on filesystem at 1733848471165Initializing all the Stores at 1733848471166 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848471166Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848471166Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848471166Obtaining lock to block concurrent updates at 1733848471297 (+131 ms)Preparing flush snapshotting stores in ef5d5a618740d31b7e380adbd000f6da at 1733848471297Finished memstore snapshotting test2727,,1733848470547.ef5d5a618740d31b7e380adbd000f6da., syncing WAL and waiting on mvcc, flushsize=dataSize=220680, getHeapSize=672720, getOffHeapSize=0, getCellsCount=6000 at 1733848471297Flushing stores of test2727,,1733848470547.ef5d5a618740d31b7e380adbd000f6da. at 1733848471297Flushing ef5d5a618740d31b7e380adbd000f6da/a: creating writer at 1733848471297Flushing ef5d5a618740d31b7e380adbd000f6da/a: appending metadata at 1733848471325 (+28 ms)Flushing ef5d5a618740d31b7e380adbd000f6da/a: closing flushed file at 1733848471325Flushing ef5d5a618740d31b7e380adbd000f6da/b: creating writer at 1733848471343 (+18 ms)Flushing ef5d5a618740d31b7e380adbd000f6da/b: appending metadata at 1733848471364 (+21 ms)Flushing ef5d5a618740d31b7e380adbd000f6da/b: closing flushed file at 1733848471364Flushing ef5d5a618740d31b7e380adbd000f6da/c: creating writer at 1733848471787 (+423 ms)Flushing ef5d5a618740d31b7e380adbd000f6da/c: appending metadata at 1733848471806 (+19 ms)Flushing ef5d5a618740d31b7e380adbd000f6da/c: closing flushed file at 1733848471806Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5eab3884: reopening flushed file at 1733848471821 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5f2ca6e2: reopening flushed file at 1733848471830 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@69c8fa37: reopening flushed file at 1733848471839 (+9 ms)Finished flush of dataSize ~215.51 KB/220680, heapSize ~656.95 KB/672720, currentSize=0 B/0 for ef5d5a618740d31b7e380adbd000f6da in 552ms, sequenceid=6000, compaction requested=false; wal=null at 1733848471848 (+9 ms)Cleaning up temporary data from old regions at 1733848471851 (+3 ms)Region opened successfully at 1733848471860 (+9 ms) 2024-12-10T16:34:31,862 DEBUG [Time-limited test {}] wal.AbstractTestWALReplay(320): region.getOpenSeqNum(): 6001, wal3.id: 0 2024-12-10T16:34:31,862 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing ef5d5a618740d31b7e380adbd000f6da, disabling compactions & flushes 2024-12-10T16:34:31,862 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region test2727,,1733848470547.ef5d5a618740d31b7e380adbd000f6da. 2024-12-10T16:34:31,862 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on test2727,,1733848470547.ef5d5a618740d31b7e380adbd000f6da. 2024-12-10T16:34:31,862 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on test2727,,1733848470547.ef5d5a618740d31b7e380adbd000f6da. after waiting 0 ms 2024-12-10T16:34:31,862 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region test2727,,1733848470547.ef5d5a618740d31b7e380adbd000f6da. 2024-12-10T16:34:31,864 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed test2727,,1733848470547.ef5d5a618740d31b7e380adbd000f6da. 2024-12-10T16:34:31,864 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for ef5d5a618740d31b7e380adbd000f6da: Waiting for close lock at 1733848471862Disabling compacts and flushes for region at 1733848471862Disabling writes for close at 1733848471862Writing region close event to WAL at 1733848471864 (+2 ms)Closed at 1733848471864 2024-12-10T16:34:31,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741870_1047 (size=95) 2024-12-10T16:34:31,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741870_1047 (size=95) 2024-12-10T16:34:31,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741870_1047 (size=95) 2024-12-10T16:34:31,871 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-10T16:34:31,871 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1733848471136) 2024-12-10T16:34:31,885 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#test2727 Thread=406 (was 403) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_545150750_22 at /127.0.0.1:59816 [Waiting for operation #11] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-14-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_545150750_22 at /127.0.0.1:60050 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_545150750_22 at /127.0.0.1:34490 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_545150750_22 at /127.0.0.1:54802 [Waiting for operation #10] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-14-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-14-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=929 (was 865) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=183 (was 183), ProcessCount=11 (was 11), AvailableMemoryMB=4992 (was 5198) 2024-12-10T16:34:31,898 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testSequentialEditLogSeqNum Thread=406, OpenFileDescriptor=929, MaxFileDescriptor=1048576, SystemLoadAverage=183, ProcessCount=11, AvailableMemoryMB=4990 2024-12-10T16:34:31,921 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T16:34:31,928 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:35477/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733848471920, archiveDir=hdfs://localhost:35477/hbase/oldWALs, maxLogs=32 2024-12-10T16:34:31,929 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor wal.1733848471928 2024-12-10T16:34:31,937 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733848471920/wal.1733848471928 2024-12-10T16:34:31,941 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new MockWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44417:44417),(127.0.0.1/127.0.0.1:36795:36795),(127.0.0.1/127.0.0.1:42633:42633)] 2024-12-10T16:34:31,943 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 96591f769a327e58d45e8036de8f960d, NAME => 'testSequentialEditLogSeqNum,,1733848471921.96591f769a327e58d45e8036de8f960d.', STARTKEY => '', ENDKEY => ''} 2024-12-10T16:34:31,944 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testSequentialEditLogSeqNum,,1733848471921.96591f769a327e58d45e8036de8f960d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T16:34:31,944 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 96591f769a327e58d45e8036de8f960d 2024-12-10T16:34:31,944 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 96591f769a327e58d45e8036de8f960d 2024-12-10T16:34:31,945 WARN [Time-limited test {}] regionserver.HRegionFileSystem(836): hdfs://localhost:35477/hbase/data/default/testSequentialEditLogSeqNum/96591f769a327e58d45e8036de8f960d doesn't exist for region: 96591f769a327e58d45e8036de8f960d on table testSequentialEditLogSeqNum 2024-12-10T16:34:31,946 WARN [Time-limited test {}] regionserver.HRegionFileSystem(854): .regioninfo file not found for region: 96591f769a327e58d45e8036de8f960d on table testSequentialEditLogSeqNum 2024-12-10T16:34:31,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741875_1052 (size=62) 2024-12-10T16:34:31,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741875_1052 (size=62) 2024-12-10T16:34:31,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741875_1052 (size=62) 2024-12-10T16:34:31,960 INFO [StoreOpener-96591f769a327e58d45e8036de8f960d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 96591f769a327e58d45e8036de8f960d 2024-12-10T16:34:31,962 INFO [StoreOpener-96591f769a327e58d45e8036de8f960d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 96591f769a327e58d45e8036de8f960d columnFamilyName a 2024-12-10T16:34:31,962 DEBUG [StoreOpener-96591f769a327e58d45e8036de8f960d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:31,963 INFO [StoreOpener-96591f769a327e58d45e8036de8f960d-1 {}] regionserver.HStore(327): Store=96591f769a327e58d45e8036de8f960d/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:31,963 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 96591f769a327e58d45e8036de8f960d 2024-12-10T16:34:31,964 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testSequentialEditLogSeqNum/96591f769a327e58d45e8036de8f960d 2024-12-10T16:34:31,964 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testSequentialEditLogSeqNum/96591f769a327e58d45e8036de8f960d 2024-12-10T16:34:31,965 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 96591f769a327e58d45e8036de8f960d 2024-12-10T16:34:31,965 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 96591f769a327e58d45e8036de8f960d 2024-12-10T16:34:31,967 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 96591f769a327e58d45e8036de8f960d 2024-12-10T16:34:31,970 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35477/hbase/data/default/testSequentialEditLogSeqNum/96591f769a327e58d45e8036de8f960d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T16:34:31,971 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 96591f769a327e58d45e8036de8f960d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67341313, jitterRate=0.003463760018348694}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T16:34:31,972 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 96591f769a327e58d45e8036de8f960d: Writing region info on filesystem at 1733848471944Initializing all the Stores at 1733848471959 (+15 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848471959Cleaning up temporary data from old regions at 1733848471965 (+6 ms)Region opened successfully at 1733848471972 (+7 ms) 2024-12-10T16:34:31,986 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 96591f769a327e58d45e8036de8f960d 1/1 column families, dataSize=770 B heapSize=1.73 KB 2024-12-10T16:34:32,013 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35477/hbase/data/default/testSequentialEditLogSeqNum/96591f769a327e58d45e8036de8f960d/.tmp/a/df0a87e577ae4ff1a40fc4545b3f60e9 is 81, key is testSequentialEditLogSeqNum/a:x0/1733848471972/Put/seqid=0 2024-12-10T16:34:32,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741876_1053 (size=5833) 2024-12-10T16:34:32,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741876_1053 (size=5833) 2024-12-10T16:34:32,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741876_1053 (size=5833) 2024-12-10T16:34:32,026 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=770 B at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:35477/hbase/data/default/testSequentialEditLogSeqNum/96591f769a327e58d45e8036de8f960d/.tmp/a/df0a87e577ae4ff1a40fc4545b3f60e9 2024-12-10T16:34:32,037 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35477/hbase/data/default/testSequentialEditLogSeqNum/96591f769a327e58d45e8036de8f960d/.tmp/a/df0a87e577ae4ff1a40fc4545b3f60e9 as hdfs://localhost:35477/hbase/data/default/testSequentialEditLogSeqNum/96591f769a327e58d45e8036de8f960d/a/df0a87e577ae4ff1a40fc4545b3f60e9 2024-12-10T16:34:32,046 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35477/hbase/data/default/testSequentialEditLogSeqNum/96591f769a327e58d45e8036de8f960d/a/df0a87e577ae4ff1a40fc4545b3f60e9, entries=10, sequenceid=13, filesize=5.7 K 2024-12-10T16:34:32,048 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~770 B/770, heapSize ~1.72 KB/1760, currentSize=0 B/0 for 96591f769a327e58d45e8036de8f960d in 62ms, sequenceid=13, compaction requested=false 2024-12-10T16:34:32,049 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 96591f769a327e58d45e8036de8f960d: 2024-12-10T16:34:32,057 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T16:34:32,057 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T16:34:32,057 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T16:34:32,057 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T16:34:32,058 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T16:34:32,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741874_1051 (size=1845) 2024-12-10T16:34:32,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741874_1051 (size=1845) 2024-12-10T16:34:32,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741874_1051 (size=1845) 2024-12-10T16:34:32,081 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:35477/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733848471920/wal.1733848471928, size=1.8 K (1845bytes) 2024-12-10T16:34:32,081 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35477/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733848471920/wal.1733848471928 2024-12-10T16:34:32,082 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:35477/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733848471920/wal.1733848471928 after 0ms 2024-12-10T16:34:32,085 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:35477/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733848471920/wal.1733848471928: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T16:34:32,086 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:35477/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733848471920/wal.1733848471928 took 5ms 2024-12-10T16:34:32,088 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:35477/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733848471920/wal.1733848471928 so closing down 2024-12-10T16:34:32,088 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-10T16:34:32,089 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1733848471928.temp 2024-12-10T16:34:32,091 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testSequentialEditLogSeqNum/96591f769a327e58d45e8036de8f960d/recovered.edits/0000000000000000003-wal.1733848471928.temp 2024-12-10T16:34:32,092 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-10T16:34:32,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741877_1054 (size=1477) 2024-12-10T16:34:32,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741877_1054 (size=1477) 2024-12-10T16:34:32,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741877_1054 (size=1477) 2024-12-10T16:34:32,103 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testSequentialEditLogSeqNum/96591f769a327e58d45e8036de8f960d/recovered.edits/0000000000000000003-wal.1733848471928.temp (wrote 15 edits, skipped 0 edits in 0 ms) 2024-12-10T16:34:32,105 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:35477/hbase/data/default/testSequentialEditLogSeqNum/96591f769a327e58d45e8036de8f960d/recovered.edits/0000000000000000003-wal.1733848471928.temp to hdfs://localhost:35477/hbase/data/default/testSequentialEditLogSeqNum/96591f769a327e58d45e8036de8f960d/recovered.edits/0000000000000000020 2024-12-10T16:34:32,105 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 17 edits across 1 Regions in 19 ms; skipped=2; WAL=hdfs://localhost:35477/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733848471920/wal.1733848471928, size=1.8 K, length=1845, corrupted=false, cancelled=false 2024-12-10T16:34:32,105 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:35477/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733848471920/wal.1733848471928, journal: Splitting hdfs://localhost:35477/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733848471920/wal.1733848471928, size=1.8 K (1845bytes) at 1733848472081Finishing writing output for hdfs://localhost:35477/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733848471920/wal.1733848471928 so closing down at 1733848472088 (+7 ms)Creating recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testSequentialEditLogSeqNum/96591f769a327e58d45e8036de8f960d/recovered.edits/0000000000000000003-wal.1733848471928.temp at 1733848472091 (+3 ms)3 split writer threads finished at 1733848472092 (+1 ms)Closed recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testSequentialEditLogSeqNum/96591f769a327e58d45e8036de8f960d/recovered.edits/0000000000000000003-wal.1733848471928.temp (wrote 15 edits, skipped 0 edits in 0 ms) at 1733848472103 (+11 ms)Rename recovered edits hdfs://localhost:35477/hbase/data/default/testSequentialEditLogSeqNum/96591f769a327e58d45e8036de8f960d/recovered.edits/0000000000000000003-wal.1733848471928.temp to hdfs://localhost:35477/hbase/data/default/testSequentialEditLogSeqNum/96591f769a327e58d45e8036de8f960d/recovered.edits/0000000000000000020 at 1733848472105 (+2 ms)Processed 17 edits across 1 Regions in 19 ms; skipped=2; WAL=hdfs://localhost:35477/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1733848471920/wal.1733848471928, size=1.8 K, length=1845, corrupted=false, cancelled=false at 1733848472105 2024-12-10T16:34:32,118 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testSequentialEditLogSeqNum Thread=411 (was 406) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_545150750_22 at /127.0.0.1:60050 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_545150750_22 at /127.0.0.1:34490 [Waiting for operation #9] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_545150750_22 at /127.0.0.1:54802 [Waiting for operation #11] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) - Thread LEAK? -, OpenFileDescriptor=967 (was 929) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=183 (was 183), ProcessCount=11 (was 11), AvailableMemoryMB=4966 (was 4990) 2024-12-10T16:34:32,131 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testRegionMadeOfBulkLoadedFilesOnly Thread=411, OpenFileDescriptor=967, MaxFileDescriptor=1048576, SystemLoadAverage=183, ProcessCount=11, AvailableMemoryMB=4964 2024-12-10T16:34:32,169 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T16:34:32,172 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T16:34:32,209 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T16:34:32,215 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-57348110, suffix=, logDir=hdfs://localhost:35477/hbase/WALs/hregion-57348110, archiveDir=hdfs://localhost:35477/hbase/oldWALs, maxLogs=32 2024-12-10T16:34:32,231 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-57348110/hregion-57348110.1733848472216, exclude list is [], retry=0 2024-12-10T16:34:32,234 DEBUG [AsyncFSWAL-17-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42039,DS-c05f9a42-583c-4f58-bba4-77e13705c0bc,DISK] 2024-12-10T16:34:32,235 DEBUG [AsyncFSWAL-17-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46873,DS-ddfb4a98-1093-42ea-847d-0d86a4cd1023,DISK] 2024-12-10T16:34:32,235 DEBUG [AsyncFSWAL-17-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43913,DS-3efadbaf-2f12-4fe8-9fd8-91ba1926bf09,DISK] 2024-12-10T16:34:32,238 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-57348110/hregion-57348110.1733848472216 2024-12-10T16:34:32,239 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44417:44417),(127.0.0.1/127.0.0.1:42633:42633),(127.0.0.1/127.0.0.1:36795:36795)] 2024-12-10T16:34:32,239 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => e7f55858225ad537627d3c9492d20f33, NAME => 'testRegionMadeOfBulkLoadedFilesOnly,,1733848472170.e7f55858225ad537627d3c9492d20f33.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testRegionMadeOfBulkLoadedFilesOnly', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35477/hbase 2024-12-10T16:34:32,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741879_1056 (size=70) 2024-12-10T16:34:32,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741879_1056 (size=70) 2024-12-10T16:34:32,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741879_1056 (size=70) 2024-12-10T16:34:32,249 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testRegionMadeOfBulkLoadedFilesOnly,,1733848472170.e7f55858225ad537627d3c9492d20f33.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T16:34:32,251 INFO [StoreOpener-e7f55858225ad537627d3c9492d20f33-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region e7f55858225ad537627d3c9492d20f33 2024-12-10T16:34:32,252 INFO [StoreOpener-e7f55858225ad537627d3c9492d20f33-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e7f55858225ad537627d3c9492d20f33 columnFamilyName a 2024-12-10T16:34:32,252 DEBUG [StoreOpener-e7f55858225ad537627d3c9492d20f33-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:32,253 INFO [StoreOpener-e7f55858225ad537627d3c9492d20f33-1 {}] regionserver.HStore(327): Store=e7f55858225ad537627d3c9492d20f33/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:32,253 INFO [StoreOpener-e7f55858225ad537627d3c9492d20f33-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region e7f55858225ad537627d3c9492d20f33 2024-12-10T16:34:32,254 INFO [StoreOpener-e7f55858225ad537627d3c9492d20f33-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e7f55858225ad537627d3c9492d20f33 columnFamilyName b 2024-12-10T16:34:32,254 DEBUG [StoreOpener-e7f55858225ad537627d3c9492d20f33-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:32,255 INFO [StoreOpener-e7f55858225ad537627d3c9492d20f33-1 {}] regionserver.HStore(327): Store=e7f55858225ad537627d3c9492d20f33/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:32,255 INFO [StoreOpener-e7f55858225ad537627d3c9492d20f33-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region e7f55858225ad537627d3c9492d20f33 2024-12-10T16:34:32,256 INFO [StoreOpener-e7f55858225ad537627d3c9492d20f33-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e7f55858225ad537627d3c9492d20f33 columnFamilyName c 2024-12-10T16:34:32,256 DEBUG [StoreOpener-e7f55858225ad537627d3c9492d20f33-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:32,257 INFO [StoreOpener-e7f55858225ad537627d3c9492d20f33-1 {}] regionserver.HStore(327): Store=e7f55858225ad537627d3c9492d20f33/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:32,257 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for e7f55858225ad537627d3c9492d20f33 2024-12-10T16:34:32,258 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e7f55858225ad537627d3c9492d20f33 2024-12-10T16:34:32,258 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e7f55858225ad537627d3c9492d20f33 2024-12-10T16:34:32,260 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for e7f55858225ad537627d3c9492d20f33 2024-12-10T16:34:32,260 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for e7f55858225ad537627d3c9492d20f33 2024-12-10T16:34:32,260 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testRegionMadeOfBulkLoadedFilesOnly descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-10T16:34:32,261 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for e7f55858225ad537627d3c9492d20f33 2024-12-10T16:34:32,264 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35477/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e7f55858225ad537627d3c9492d20f33/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T16:34:32,265 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened e7f55858225ad537627d3c9492d20f33; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64371800, jitterRate=-0.04078543186187744}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-10T16:34:32,266 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for e7f55858225ad537627d3c9492d20f33: Writing region info on filesystem at 1733848472249Initializing all the Stores at 1733848472250 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848472250Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848472250Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848472250Cleaning up temporary data from old regions at 1733848472260 (+10 ms)Region opened successfully at 1733848472266 (+6 ms) 2024-12-10T16:34:32,266 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing e7f55858225ad537627d3c9492d20f33, disabling compactions & flushes 2024-12-10T16:34:32,266 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testRegionMadeOfBulkLoadedFilesOnly,,1733848472170.e7f55858225ad537627d3c9492d20f33. 2024-12-10T16:34:32,266 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testRegionMadeOfBulkLoadedFilesOnly,,1733848472170.e7f55858225ad537627d3c9492d20f33. 2024-12-10T16:34:32,266 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testRegionMadeOfBulkLoadedFilesOnly,,1733848472170.e7f55858225ad537627d3c9492d20f33. after waiting 0 ms 2024-12-10T16:34:32,266 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testRegionMadeOfBulkLoadedFilesOnly,,1733848472170.e7f55858225ad537627d3c9492d20f33. 2024-12-10T16:34:32,267 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testRegionMadeOfBulkLoadedFilesOnly,,1733848472170.e7f55858225ad537627d3c9492d20f33. 2024-12-10T16:34:32,267 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for e7f55858225ad537627d3c9492d20f33: Waiting for close lock at 1733848472266Disabling compacts and flushes for region at 1733848472266Disabling writes for close at 1733848472266Writing region close event to WAL at 1733848472267 (+1 ms)Closed at 1733848472267 2024-12-10T16:34:32,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741878_1055 (size=95) 2024-12-10T16:34:32,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741878_1055 (size=95) 2024-12-10T16:34:32,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741878_1055 (size=95) 2024-12-10T16:34:32,272 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-10T16:34:32,272 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-57348110:(num 1733848472216) 2024-12-10T16:34:32,273 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-10T16:34:32,275 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:35477/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733848472168, archiveDir=hdfs://localhost:35477/hbase/oldWALs, maxLogs=32 2024-12-10T16:34:32,289 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733848472168/wal.1733848472276, exclude list is [], retry=0 2024-12-10T16:34:32,293 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42039,DS-c05f9a42-583c-4f58-bba4-77e13705c0bc,DISK] 2024-12-10T16:34:32,293 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46873,DS-ddfb4a98-1093-42ea-847d-0d86a4cd1023,DISK] 2024-12-10T16:34:32,294 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43913,DS-3efadbaf-2f12-4fe8-9fd8-91ba1926bf09,DISK] 2024-12-10T16:34:32,296 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733848472168/wal.1733848472276 2024-12-10T16:34:32,300 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44417:44417),(127.0.0.1/127.0.0.1:42633:42633),(127.0.0.1/127.0.0.1:36795:36795)] 2024-12-10T16:34:32,300 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => e7f55858225ad537627d3c9492d20f33, NAME => 'testRegionMadeOfBulkLoadedFilesOnly,,1733848472170.e7f55858225ad537627d3c9492d20f33.', STARTKEY => '', ENDKEY => ''} 2024-12-10T16:34:32,300 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testRegionMadeOfBulkLoadedFilesOnly,,1733848472170.e7f55858225ad537627d3c9492d20f33.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T16:34:32,300 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for e7f55858225ad537627d3c9492d20f33 2024-12-10T16:34:32,300 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for e7f55858225ad537627d3c9492d20f33 2024-12-10T16:34:32,302 INFO [StoreOpener-e7f55858225ad537627d3c9492d20f33-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region e7f55858225ad537627d3c9492d20f33 2024-12-10T16:34:32,303 INFO [StoreOpener-e7f55858225ad537627d3c9492d20f33-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e7f55858225ad537627d3c9492d20f33 columnFamilyName a 2024-12-10T16:34:32,303 DEBUG [StoreOpener-e7f55858225ad537627d3c9492d20f33-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:32,304 INFO [StoreOpener-e7f55858225ad537627d3c9492d20f33-1 {}] regionserver.HStore(327): Store=e7f55858225ad537627d3c9492d20f33/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:32,304 INFO [StoreOpener-e7f55858225ad537627d3c9492d20f33-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region e7f55858225ad537627d3c9492d20f33 2024-12-10T16:34:32,305 INFO [StoreOpener-e7f55858225ad537627d3c9492d20f33-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e7f55858225ad537627d3c9492d20f33 columnFamilyName b 2024-12-10T16:34:32,305 DEBUG [StoreOpener-e7f55858225ad537627d3c9492d20f33-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:32,306 INFO [StoreOpener-e7f55858225ad537627d3c9492d20f33-1 {}] regionserver.HStore(327): Store=e7f55858225ad537627d3c9492d20f33/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:32,306 INFO [StoreOpener-e7f55858225ad537627d3c9492d20f33-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region e7f55858225ad537627d3c9492d20f33 2024-12-10T16:34:32,307 INFO [StoreOpener-e7f55858225ad537627d3c9492d20f33-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e7f55858225ad537627d3c9492d20f33 columnFamilyName c 2024-12-10T16:34:32,307 DEBUG [StoreOpener-e7f55858225ad537627d3c9492d20f33-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:32,308 INFO [StoreOpener-e7f55858225ad537627d3c9492d20f33-1 {}] regionserver.HStore(327): Store=e7f55858225ad537627d3c9492d20f33/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:32,308 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for e7f55858225ad537627d3c9492d20f33 2024-12-10T16:34:32,309 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e7f55858225ad537627d3c9492d20f33 2024-12-10T16:34:32,310 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e7f55858225ad537627d3c9492d20f33 2024-12-10T16:34:32,311 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for e7f55858225ad537627d3c9492d20f33 2024-12-10T16:34:32,311 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for e7f55858225ad537627d3c9492d20f33 2024-12-10T16:34:32,311 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testRegionMadeOfBulkLoadedFilesOnly descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-10T16:34:32,313 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for e7f55858225ad537627d3c9492d20f33 2024-12-10T16:34:32,314 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened e7f55858225ad537627d3c9492d20f33; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74647163, jitterRate=0.1123294085264206}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-10T16:34:32,314 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for e7f55858225ad537627d3c9492d20f33: Writing region info on filesystem at 1733848472301Initializing all the Stores at 1733848472302 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848472302Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848472302Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848472302Cleaning up temporary data from old regions at 1733848472311 (+9 ms)Region opened successfully at 1733848472314 (+3 ms) 2024-12-10T16:34:32,319 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35477/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile is 28, key is \x0D/a:a/1733848472318/Put/seqid=0 2024-12-10T16:34:32,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741881_1058 (size=4826) 2024-12-10T16:34:32,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741881_1058 (size=4826) 2024-12-10T16:34:32,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741881_1058 (size=4826) 2024-12-10T16:34:32,330 INFO [Time-limited test {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:35477/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile for inclusion in e7f55858225ad537627d3c9492d20f33/a 2024-12-10T16:34:32,340 DEBUG [Time-limited test {}] regionserver.HStore(626): HFile bounds: first= last=z 2024-12-10T16:34:32,340 DEBUG [Time-limited test {}] regionserver.HStore(628): Region bounds: first= last= 2024-12-10T16:34:32,340 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for e7f55858225ad537627d3c9492d20f33: 2024-12-10T16:34:32,342 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35477/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile as hdfs://localhost:35477/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e7f55858225ad537627d3c9492d20f33/a/6cc6c3d4f19e4f4899fdd9b7572983fc_SeqId_3_ 2024-12-10T16:34:32,343 INFO [Time-limited test {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:35477/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile into e7f55858225ad537627d3c9492d20f33/a as hdfs://localhost:35477/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e7f55858225ad537627d3c9492d20f33/a/6cc6c3d4f19e4f4899fdd9b7572983fc_SeqId_3_ - updating store file list. 2024-12-10T16:34:32,349 DEBUG [Time-limited test {}] regionserver.HStoreFile(483): HFile Bloom filter type for 6cc6c3d4f19e4f4899fdd9b7572983fc_SeqId_3_: NONE, but ROW specified in column family configuration 2024-12-10T16:34:32,349 INFO [Time-limited test {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:35477/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e7f55858225ad537627d3c9492d20f33/a/6cc6c3d4f19e4f4899fdd9b7572983fc_SeqId_3_ into e7f55858225ad537627d3c9492d20f33/a 2024-12-10T16:34:32,349 INFO [Time-limited test {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:35477/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile into e7f55858225ad537627d3c9492d20f33/a (new location: hdfs://localhost:35477/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e7f55858225ad537627d3c9492d20f33/a/6cc6c3d4f19e4f4899fdd9b7572983fc_SeqId_3_) 2024-12-10T16:34:32,397 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:35477/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733848472168/wal.1733848472276, size=0 (0bytes) 2024-12-10T16:34:32,398 WARN [Time-limited test {}] wal.WALSplitter(453): File hdfs://localhost:35477/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733848472168/wal.1733848472276 might be still open, length is 0 2024-12-10T16:34:32,398 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35477/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733848472168/wal.1733848472276 2024-12-10T16:34:32,398 WARN [IPC Server handler 1 on default port 35477 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733848472168/wal.1733848472276 has not been closed. Lease recovery is in progress. RecoveryId = 1059 for block blk_1073741880_1057 2024-12-10T16:34:32,399 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35477/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733848472168/wal.1733848472276 after 1ms 2024-12-10T16:34:35,321 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_545150750_22 at /127.0.0.1:54994 [Receiving block BP-1758511473-172.17.0.3-1733848457790:blk_1073741880_1057] {}] datanode.DataXceiver(331): 127.0.0.1:42039:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54994 dst: /127.0.0.1:42039 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:42039 remote=/127.0.0.1:54994]. Total timeout mills is 60000, 57035 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T16:34:35,322 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_545150750_22 at /127.0.0.1:34588 [Receiving block BP-1758511473-172.17.0.3-1733848457790:blk_1073741880_1057] {}] datanode.DataXceiver(331): 127.0.0.1:43913:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34588 dst: /127.0.0.1:43913 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T16:34:35,323 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_545150750_22 at /127.0.0.1:59950 [Receiving block BP-1758511473-172.17.0.3-1733848457790:blk_1073741880_1057] {}] datanode.DataXceiver(331): 127.0.0.1:46873:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59950 dst: /127.0.0.1:46873 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T16:34:35,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741880_1059 (size=473) 2024-12-10T16:34:35,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741880_1059 (size=473) 2024-12-10T16:34:36,400 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:35477/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733848472168/wal.1733848472276 after 4002ms 2024-12-10T16:34:36,404 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:35477/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733848472168/wal.1733848472276: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T16:34:36,404 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:35477/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733848472168/wal.1733848472276 took 4007ms 2024-12-10T16:34:36,407 INFO [Time-limited test {}] wal.WALSplitter(493): EOF from hdfs://localhost:35477/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733848472168/wal.1733848472276; continuing. 2024-12-10T16:34:36,407 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:35477/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733848472168/wal.1733848472276 so closing down 2024-12-10T16:34:36,407 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-10T16:34:36,410 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000005-wal.1733848472276.temp 2024-12-10T16:34:36,412 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e7f55858225ad537627d3c9492d20f33/recovered.edits/0000000000000000005-wal.1733848472276.temp 2024-12-10T16:34:36,413 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-10T16:34:36,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741882_1060 (size=259) 2024-12-10T16:34:36,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741882_1060 (size=259) 2024-12-10T16:34:36,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741882_1060 (size=259) 2024-12-10T16:34:36,419 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e7f55858225ad537627d3c9492d20f33/recovered.edits/0000000000000000005-wal.1733848472276.temp (wrote 1 edits, skipped 0 edits in 0 ms) 2024-12-10T16:34:36,420 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:35477/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e7f55858225ad537627d3c9492d20f33/recovered.edits/0000000000000000005-wal.1733848472276.temp to hdfs://localhost:35477/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e7f55858225ad537627d3c9492d20f33/recovered.edits/0000000000000000005 2024-12-10T16:34:36,420 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 2 edits across 1 Regions in 15 ms; skipped=1; WAL=hdfs://localhost:35477/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733848472168/wal.1733848472276, size=0, length=0, corrupted=false, cancelled=false 2024-12-10T16:34:36,421 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:35477/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733848472168/wal.1733848472276, journal: Splitting hdfs://localhost:35477/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733848472168/wal.1733848472276, size=0 (0bytes) at 1733848472397Finishing writing output for hdfs://localhost:35477/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733848472168/wal.1733848472276 so closing down at 1733848476407 (+4010 ms)Creating recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e7f55858225ad537627d3c9492d20f33/recovered.edits/0000000000000000005-wal.1733848472276.temp at 1733848476412 (+5 ms)3 split writer threads finished at 1733848476413 (+1 ms)Closed recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e7f55858225ad537627d3c9492d20f33/recovered.edits/0000000000000000005-wal.1733848472276.temp (wrote 1 edits, skipped 0 edits in 0 ms) at 1733848476419 (+6 ms)Rename recovered edits hdfs://localhost:35477/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e7f55858225ad537627d3c9492d20f33/recovered.edits/0000000000000000005-wal.1733848472276.temp to hdfs://localhost:35477/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e7f55858225ad537627d3c9492d20f33/recovered.edits/0000000000000000005 at 1733848476420 (+1 ms)Processed 2 edits across 1 Regions in 15 ms; skipped=1; WAL=hdfs://localhost:35477/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733848472168/wal.1733848472276, size=0, length=0, corrupted=false, cancelled=false at 1733848476421 (+1 ms) 2024-12-10T16:34:36,422 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:35477/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733848472168/wal.1733848472276 to hdfs://localhost:35477/hbase/oldWALs/wal.1733848472276 2024-12-10T16:34:36,423 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:35477/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e7f55858225ad537627d3c9492d20f33/recovered.edits/0000000000000000005 2024-12-10T16:34:36,423 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-10T16:34:36,425 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:35477/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733848472168, archiveDir=hdfs://localhost:35477/hbase/oldWALs, maxLogs=32 2024-12-10T16:34:36,437 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733848472168/wal.1733848476425, exclude list is [], retry=0 2024-12-10T16:34:36,440 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46873,DS-ddfb4a98-1093-42ea-847d-0d86a4cd1023,DISK] 2024-12-10T16:34:36,440 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42039,DS-c05f9a42-583c-4f58-bba4-77e13705c0bc,DISK] 2024-12-10T16:34:36,440 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43913,DS-3efadbaf-2f12-4fe8-9fd8-91ba1926bf09,DISK] 2024-12-10T16:34:36,442 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733848472168/wal.1733848476425 2024-12-10T16:34:36,442 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42633:42633),(127.0.0.1/127.0.0.1:44417:44417),(127.0.0.1/127.0.0.1:36795:36795)] 2024-12-10T16:34:36,442 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => e7f55858225ad537627d3c9492d20f33, NAME => 'testRegionMadeOfBulkLoadedFilesOnly,,1733848472170.e7f55858225ad537627d3c9492d20f33.', STARTKEY => '', ENDKEY => ''} 2024-12-10T16:34:36,443 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testRegionMadeOfBulkLoadedFilesOnly,,1733848472170.e7f55858225ad537627d3c9492d20f33.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T16:34:36,443 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for e7f55858225ad537627d3c9492d20f33 2024-12-10T16:34:36,443 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for e7f55858225ad537627d3c9492d20f33 2024-12-10T16:34:36,444 INFO [StoreOpener-e7f55858225ad537627d3c9492d20f33-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region e7f55858225ad537627d3c9492d20f33 2024-12-10T16:34:36,445 INFO [StoreOpener-e7f55858225ad537627d3c9492d20f33-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e7f55858225ad537627d3c9492d20f33 columnFamilyName a 2024-12-10T16:34:36,445 DEBUG [StoreOpener-e7f55858225ad537627d3c9492d20f33-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:36,451 DEBUG [StoreFileOpener-e7f55858225ad537627d3c9492d20f33-a-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 6cc6c3d4f19e4f4899fdd9b7572983fc_SeqId_3_: NONE, but ROW specified in column family configuration 2024-12-10T16:34:36,452 DEBUG [StoreOpener-e7f55858225ad537627d3c9492d20f33-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35477/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e7f55858225ad537627d3c9492d20f33/a/6cc6c3d4f19e4f4899fdd9b7572983fc_SeqId_3_ 2024-12-10T16:34:36,452 INFO [StoreOpener-e7f55858225ad537627d3c9492d20f33-1 {}] regionserver.HStore(327): Store=e7f55858225ad537627d3c9492d20f33/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:36,452 INFO [StoreOpener-e7f55858225ad537627d3c9492d20f33-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region e7f55858225ad537627d3c9492d20f33 2024-12-10T16:34:36,453 INFO [StoreOpener-e7f55858225ad537627d3c9492d20f33-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e7f55858225ad537627d3c9492d20f33 columnFamilyName b 2024-12-10T16:34:36,454 DEBUG [StoreOpener-e7f55858225ad537627d3c9492d20f33-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:36,454 INFO [StoreOpener-e7f55858225ad537627d3c9492d20f33-1 {}] regionserver.HStore(327): Store=e7f55858225ad537627d3c9492d20f33/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:36,454 INFO [StoreOpener-e7f55858225ad537627d3c9492d20f33-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region e7f55858225ad537627d3c9492d20f33 2024-12-10T16:34:36,455 INFO [StoreOpener-e7f55858225ad537627d3c9492d20f33-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e7f55858225ad537627d3c9492d20f33 columnFamilyName c 2024-12-10T16:34:36,455 DEBUG [StoreOpener-e7f55858225ad537627d3c9492d20f33-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:36,455 INFO [StoreOpener-e7f55858225ad537627d3c9492d20f33-1 {}] regionserver.HStore(327): Store=e7f55858225ad537627d3c9492d20f33/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:36,456 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for e7f55858225ad537627d3c9492d20f33 2024-12-10T16:34:36,456 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e7f55858225ad537627d3c9492d20f33 2024-12-10T16:34:36,458 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e7f55858225ad537627d3c9492d20f33 2024-12-10T16:34:36,458 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:35477/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e7f55858225ad537627d3c9492d20f33/recovered.edits/0000000000000000005 2024-12-10T16:34:36,460 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:35477/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e7f55858225ad537627d3c9492d20f33/recovered.edits/0000000000000000005: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T16:34:36,461 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 1, skipped 0, firstSequenceIdInLog=5, maxSequenceIdInLog=5, path=hdfs://localhost:35477/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e7f55858225ad537627d3c9492d20f33/recovered.edits/0000000000000000005 2024-12-10T16:34:36,461 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing e7f55858225ad537627d3c9492d20f33 3/3 column families, dataSize=58 B heapSize=904 B 2024-12-10T16:34:36,476 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35477/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e7f55858225ad537627d3c9492d20f33/.tmp/a/09f784fd8ba8485ab059283d92173f2e is 62, key is testRegionMadeOfBulkLoadedFilesOnly/a:a/1733848472355/Put/seqid=0 2024-12-10T16:34:36,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741884_1062 (size=5149) 2024-12-10T16:34:36,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741884_1062 (size=5149) 2024-12-10T16:34:36,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741884_1062 (size=5149) 2024-12-10T16:34:36,482 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:35477/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e7f55858225ad537627d3c9492d20f33/.tmp/a/09f784fd8ba8485ab059283d92173f2e 2024-12-10T16:34:36,487 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35477/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e7f55858225ad537627d3c9492d20f33/.tmp/a/09f784fd8ba8485ab059283d92173f2e as hdfs://localhost:35477/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e7f55858225ad537627d3c9492d20f33/a/09f784fd8ba8485ab059283d92173f2e 2024-12-10T16:34:36,493 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35477/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e7f55858225ad537627d3c9492d20f33/a/09f784fd8ba8485ab059283d92173f2e, entries=1, sequenceid=5, filesize=5.0 K 2024-12-10T16:34:36,493 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~58 B/58, heapSize ~376 B/376, currentSize=0 B/0 for e7f55858225ad537627d3c9492d20f33 in 32ms, sequenceid=5, compaction requested=false; wal=null 2024-12-10T16:34:36,494 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:35477/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e7f55858225ad537627d3c9492d20f33/recovered.edits/0000000000000000005 2024-12-10T16:34:36,495 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for e7f55858225ad537627d3c9492d20f33 2024-12-10T16:34:36,495 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for e7f55858225ad537627d3c9492d20f33 2024-12-10T16:34:36,496 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testRegionMadeOfBulkLoadedFilesOnly descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-10T16:34:36,497 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for e7f55858225ad537627d3c9492d20f33 2024-12-10T16:34:36,499 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35477/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/e7f55858225ad537627d3c9492d20f33/recovered.edits/5.seqid, newMaxSeqId=5, maxSeqId=1 2024-12-10T16:34:36,500 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened e7f55858225ad537627d3c9492d20f33; next sequenceid=6; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71195558, jitterRate=0.060896486043930054}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-10T16:34:36,500 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for e7f55858225ad537627d3c9492d20f33: Writing region info on filesystem at 1733848476443Initializing all the Stores at 1733848476444 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848476444Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848476444Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848476444Obtaining lock to block concurrent updates at 1733848476461 (+17 ms)Preparing flush snapshotting stores in e7f55858225ad537627d3c9492d20f33 at 1733848476461Finished memstore snapshotting testRegionMadeOfBulkLoadedFilesOnly,,1733848472170.e7f55858225ad537627d3c9492d20f33., syncing WAL and waiting on mvcc, flushsize=dataSize=58, getHeapSize=856, getOffHeapSize=0, getCellsCount=1 at 1733848476461Flushing stores of testRegionMadeOfBulkLoadedFilesOnly,,1733848472170.e7f55858225ad537627d3c9492d20f33. at 1733848476461Flushing e7f55858225ad537627d3c9492d20f33/a: creating writer at 1733848476461Flushing e7f55858225ad537627d3c9492d20f33/a: appending metadata at 1733848476475 (+14 ms)Flushing e7f55858225ad537627d3c9492d20f33/a: closing flushed file at 1733848476475Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@57e5b7ba: reopening flushed file at 1733848476486 (+11 ms)Finished flush of dataSize ~58 B/58, heapSize ~376 B/376, currentSize=0 B/0 for e7f55858225ad537627d3c9492d20f33 in 32ms, sequenceid=5, compaction requested=false; wal=null at 1733848476493 (+7 ms)Cleaning up temporary data from old regions at 1733848476495 (+2 ms)Region opened successfully at 1733848476500 (+5 ms) 2024-12-10T16:34:36,504 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing e7f55858225ad537627d3c9492d20f33, disabling compactions & flushes 2024-12-10T16:34:36,504 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testRegionMadeOfBulkLoadedFilesOnly,,1733848472170.e7f55858225ad537627d3c9492d20f33. 2024-12-10T16:34:36,504 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testRegionMadeOfBulkLoadedFilesOnly,,1733848472170.e7f55858225ad537627d3c9492d20f33. 2024-12-10T16:34:36,504 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testRegionMadeOfBulkLoadedFilesOnly,,1733848472170.e7f55858225ad537627d3c9492d20f33. after waiting 0 ms 2024-12-10T16:34:36,504 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testRegionMadeOfBulkLoadedFilesOnly,,1733848472170.e7f55858225ad537627d3c9492d20f33. 2024-12-10T16:34:36,505 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testRegionMadeOfBulkLoadedFilesOnly,,1733848472170.e7f55858225ad537627d3c9492d20f33. 2024-12-10T16:34:36,505 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for e7f55858225ad537627d3c9492d20f33: Waiting for close lock at 1733848476504Disabling compacts and flushes for region at 1733848476504Disabling writes for close at 1733848476504Writing region close event to WAL at 1733848476505 (+1 ms)Closed at 1733848476505 2024-12-10T16:34:36,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741883_1061 (size=95) 2024-12-10T16:34:36,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741883_1061 (size=95) 2024-12-10T16:34:36,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741883_1061 (size=95) 2024-12-10T16:34:36,511 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-10T16:34:36,511 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1733848476425) 2024-12-10T16:34:36,523 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testRegionMadeOfBulkLoadedFilesOnly Thread=414 (was 411) Potentially hanging thread: AsyncFSWAL-17-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/cluster_2abc8474-d2bc-97ae-7082-3f3155e63df1/data/data5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkinstestRegionMadeOfBulkLoadedFilesOnly@localhost:35477 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1917388039_22 at /127.0.0.1:59994 [Waiting for operation #13] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-17-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/cluster_2abc8474-d2bc-97ae-7082-3f3155e63df1/data/data6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-17-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1319272049) connection to localhost/127.0.0.1:35477 from jenkinstestRegionMadeOfBulkLoadedFilesOnly java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1917388039_22 at /127.0.0.1:55020 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1024 (was 967) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=176 (was 183), ProcessCount=11 (was 11), AvailableMemoryMB=4918 (was 4964) 2024-12-10T16:34:36,535 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterRegionMovedWithMultiCF Thread=414, OpenFileDescriptor=1024, MaxFileDescriptor=1048576, SystemLoadAverage=176, ProcessCount=11, AvailableMemoryMB=4917 2024-12-10T16:34:36,548 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T16:34:36,552 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-10T16:34:36,556 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 4b7737f37de9,42829,1733848461149 2024-12-10T16:34:36,558 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@b136694 2024-12-10T16:34:36,558 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-10T16:34:36,560 INFO [HMaster-EventLoopGroup-2-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41624, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-10T16:34:36,564 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42829 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testReplayEditsAfterRegionMovedWithMultiCF', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T16:34:36,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42829 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF 2024-12-10T16:34:36,572 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_PRE_OPERATION 2024-12-10T16:34:36,574 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42829 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testReplayEditsAfterRegionMovedWithMultiCF" procId is: 4 2024-12-10T16:34:36,574 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:36,576 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-10T16:34:36,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42829 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-10T16:34:36,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741885_1063 (size=694) 2024-12-10T16:34:36,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741885_1063 (size=694) 2024-12-10T16:34:36,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741885_1063 (size=694) 2024-12-10T16:34:36,589 INFO [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 3c4a0fab46c2c0263b38e45209989070, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsAfterRegionMovedWithMultiCF', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f 2024-12-10T16:34:36,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741886_1064 (size=77) 2024-12-10T16:34:36,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741886_1064 (size=77) 2024-12-10T16:34:36,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741886_1064 (size=77) 2024-12-10T16:34:36,600 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T16:34:36,600 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1722): Closing 3c4a0fab46c2c0263b38e45209989070, disabling compactions & flushes 2024-12-10T16:34:36,600 INFO [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. 2024-12-10T16:34:36,600 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. 2024-12-10T16:34:36,600 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. after waiting 0 ms 2024-12-10T16:34:36,600 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. 2024-12-10T16:34:36,600 INFO [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. 2024-12-10T16:34:36,600 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1676): Region close journal for 3c4a0fab46c2c0263b38e45209989070: Waiting for close lock at 1733848476600Disabling compacts and flushes for region at 1733848476600Disabling writes for close at 1733848476600Writing region close event to WAL at 1733848476600Closed at 1733848476600 2024-12-10T16:34:36,602 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_ADD_TO_META 2024-12-10T16:34:36,607 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070.","families":{"info":[{"qualifier":"regioninfo","vlen":76,"tag":[],"timestamp":"1733848476602"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733848476602"}]},"ts":"1733848476602"} 2024-12-10T16:34:36,611 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-10T16:34:36,612 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-10T16:34:36,615 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testReplayEditsAfterRegionMovedWithMultiCF","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733848476613"}]},"ts":"1733848476613"} 2024-12-10T16:34:36,619 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testReplayEditsAfterRegionMovedWithMultiCF, state=ENABLING in hbase:meta 2024-12-10T16:34:36,619 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {4b7737f37de9=0} racks are {/default-rack=0} 2024-12-10T16:34:36,621 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-10T16:34:36,621 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-10T16:34:36,621 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-10T16:34:36,621 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-10T16:34:36,621 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-10T16:34:36,621 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-10T16:34:36,621 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-10T16:34:36,621 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-10T16:34:36,621 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-10T16:34:36,621 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-10T16:34:36,622 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=3c4a0fab46c2c0263b38e45209989070, ASSIGN}] 2024-12-10T16:34:36,625 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=3c4a0fab46c2c0263b38e45209989070, ASSIGN 2024-12-10T16:34:36,626 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=3c4a0fab46c2c0263b38e45209989070, ASSIGN; state=OFFLINE, location=4b7737f37de9,35753,1733848461832; forceNewPlan=false, retain=false 2024-12-10T16:34:36,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42829 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-10T16:34:36,780 INFO [4b7737f37de9:42829 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-10T16:34:36,782 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=3c4a0fab46c2c0263b38e45209989070, regionState=OPENING, regionLocation=4b7737f37de9,35753,1733848461832 2024-12-10T16:34:36,786 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=3c4a0fab46c2c0263b38e45209989070, ASSIGN because future has completed 2024-12-10T16:34:36,787 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3c4a0fab46c2c0263b38e45209989070, server=4b7737f37de9,35753,1733848461832}] 2024-12-10T16:34:36,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42829 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-10T16:34:36,941 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-10T16:34:36,943 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51231, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-10T16:34:36,948 INFO [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. 2024-12-10T16:34:36,948 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 3c4a0fab46c2c0263b38e45209989070, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070.', STARTKEY => '', ENDKEY => ''} 2024-12-10T16:34:36,948 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterRegionMovedWithMultiCF 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:36,948 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T16:34:36,949 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:36,949 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:36,950 INFO [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:36,952 INFO [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3c4a0fab46c2c0263b38e45209989070 columnFamilyName cf1 2024-12-10T16:34:36,952 DEBUG [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:36,952 INFO [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] regionserver.HStore(327): Store=3c4a0fab46c2c0263b38e45209989070/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:36,953 INFO [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:36,955 INFO [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3c4a0fab46c2c0263b38e45209989070 columnFamilyName cf2 2024-12-10T16:34:36,955 DEBUG [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:36,956 INFO [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] regionserver.HStore(327): Store=3c4a0fab46c2c0263b38e45209989070/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:36,956 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:36,957 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:36,957 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:36,958 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:36,958 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:36,959 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterRegionMovedWithMultiCF descriptor;using region.getMemStoreFlushHeapSize/# of families (64.0 M)) instead. 2024-12-10T16:34:36,960 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:36,962 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T16:34:36,963 INFO [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 3c4a0fab46c2c0263b38e45209989070; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73467346, jitterRate=0.09474876523017883}}}, FlushLargeStoresPolicy{flushSizeLowerBound=67108864} 2024-12-10T16:34:36,963 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:36,963 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 3c4a0fab46c2c0263b38e45209989070: Running coprocessor pre-open hook at 1733848476949Writing region info on filesystem at 1733848476949Initializing all the Stores at 1733848476950 (+1 ms)Instantiating store for column family {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848476950Instantiating store for column family {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848476950Cleaning up temporary data from old regions at 1733848476958 (+8 ms)Running coprocessor post-open hooks at 1733848476963 (+5 ms)Region opened successfully at 1733848476963 2024-12-10T16:34:36,965 INFO [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070., pid=6, masterSystemTime=1733848476940 2024-12-10T16:34:36,968 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. 2024-12-10T16:34:36,968 INFO [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. 2024-12-10T16:34:36,969 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=3c4a0fab46c2c0263b38e45209989070, regionState=OPEN, openSeqNum=2, regionLocation=4b7737f37de9,35753,1733848461832 2024-12-10T16:34:36,972 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3c4a0fab46c2c0263b38e45209989070, server=4b7737f37de9,35753,1733848461832 because future has completed 2024-12-10T16:34:36,977 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-10T16:34:36,977 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 3c4a0fab46c2c0263b38e45209989070, server=4b7737f37de9,35753,1733848461832 in 187 msec 2024-12-10T16:34:36,980 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-10T16:34:36,980 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=3c4a0fab46c2c0263b38e45209989070, ASSIGN in 355 msec 2024-12-10T16:34:36,982 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-10T16:34:36,982 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testReplayEditsAfterRegionMovedWithMultiCF","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733848476982"}]},"ts":"1733848476982"} 2024-12-10T16:34:36,985 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testReplayEditsAfterRegionMovedWithMultiCF, state=ENABLED in hbase:meta 2024-12-10T16:34:36,986 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_POST_OPERATION 2024-12-10T16:34:36,989 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF in 420 msec 2024-12-10T16:34:37,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42829 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-10T16:34:37,209 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testReplayEditsAfterRegionMovedWithMultiCF completed 2024-12-10T16:34:37,209 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testReplayEditsAfterRegionMovedWithMultiCF get assigned. Timeout = 60000ms 2024-12-10T16:34:37,210 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T16:34:37,217 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testReplayEditsAfterRegionMovedWithMultiCF assigned to meta. Checking AM states. 2024-12-10T16:34:37,218 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T16:34:37,219 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testReplayEditsAfterRegionMovedWithMultiCF assigned. 2024-12-10T16:34:37,234 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testReplayEditsAfterRegionMovedWithMultiCF', row='r1', locateType=CURRENT is [region=testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070., hostname=4b7737f37de9,35753,1733848461832, seqNum=2] 2024-12-10T16:34:37,236 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T16:34:37,238 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54760, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T16:34:37,251 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42829 {}] master.HMaster(2410): Client=jenkins//172.17.0.3 move hri=3c4a0fab46c2c0263b38e45209989070, source=4b7737f37de9,35753,1733848461832, destination=4b7737f37de9,40043,1733848461924, warming up region on 4b7737f37de9,40043,1733848461924 2024-12-10T16:34:37,253 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42829 {}] master.HMaster(2414): Client=jenkins//172.17.0.3 move hri=3c4a0fab46c2c0263b38e45209989070, source=4b7737f37de9,35753,1733848461832, destination=4b7737f37de9,40043,1733848461924, running balancer 2024-12-10T16:34:37,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42829 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=3c4a0fab46c2c0263b38e45209989070, REOPEN/MOVE 2024-12-10T16:34:37,255 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=3c4a0fab46c2c0263b38e45209989070, REOPEN/MOVE 2024-12-10T16:34:37,258 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=7 updating hbase:meta row=3c4a0fab46c2c0263b38e45209989070, regionState=CLOSING, regionLocation=4b7737f37de9,35753,1733848461832 2024-12-10T16:34:37,258 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40043 {}] regionserver.RSRpcServices(2066): Warmup testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. 2024-12-10T16:34:37,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40043 {}] regionserver.HRegion(7855): Warmup {ENCODED => 3c4a0fab46c2c0263b38e45209989070, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070.', STARTKEY => '', ENDKEY => ''} 2024-12-10T16:34:37,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40043 {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T16:34:37,259 INFO [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:37,261 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=3c4a0fab46c2c0263b38e45209989070, REOPEN/MOVE because future has completed 2024-12-10T16:34:37,261 INFO [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3c4a0fab46c2c0263b38e45209989070 columnFamilyName cf1 2024-12-10T16:34:37,261 DEBUG [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:37,261 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T16:34:37,262 INFO [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] regionserver.HStore(327): Store=3c4a0fab46c2c0263b38e45209989070/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:37,262 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3c4a0fab46c2c0263b38e45209989070, server=4b7737f37de9,35753,1733848461832}] 2024-12-10T16:34:37,262 INFO [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:37,263 INFO [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3c4a0fab46c2c0263b38e45209989070 columnFamilyName cf2 2024-12-10T16:34:37,263 DEBUG [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:37,264 INFO [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] regionserver.HStore(327): Store=3c4a0fab46c2c0263b38e45209989070/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:37,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40043 {}] regionserver.HRegion(1722): Closing 3c4a0fab46c2c0263b38e45209989070, disabling compactions & flushes 2024-12-10T16:34:37,264 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40043 {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. 2024-12-10T16:34:37,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40043 {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. 2024-12-10T16:34:37,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40043 {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. after waiting 0 ms 2024-12-10T16:34:37,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40043 {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. 2024-12-10T16:34:37,265 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40043 {}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. 2024-12-10T16:34:37,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40043 {}] regionserver.HRegion(1676): Region close journal for 3c4a0fab46c2c0263b38e45209989070: Waiting for close lock at 1733848477264Disabling compacts and flushes for region at 1733848477264Disabling writes for close at 1733848477264Writing region close event to WAL at 1733848477265 (+1 ms)Closed at 1733848477265 2024-12-10T16:34:37,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42829 {}] procedure.ProcedureSyncWait(219): waitFor pid=7 2024-12-10T16:34:37,421 INFO [RS_CLOSE_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] handler.UnassignRegionHandler(122): Close 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:37,421 DEBUG [RS_CLOSE_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-10T16:34:37,422 DEBUG [RS_CLOSE_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1722): Closing 3c4a0fab46c2c0263b38e45209989070, disabling compactions & flushes 2024-12-10T16:34:37,422 INFO [RS_CLOSE_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. 2024-12-10T16:34:37,422 DEBUG [RS_CLOSE_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. 2024-12-10T16:34:37,422 DEBUG [RS_CLOSE_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. after waiting 0 ms 2024-12-10T16:34:37,422 DEBUG [RS_CLOSE_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. 2024-12-10T16:34:37,422 INFO [RS_CLOSE_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(2902): Flushing 3c4a0fab46c2c0263b38e45209989070 2/2 column families, dataSize=31 B heapSize=616 B 2024-12-10T16:34:37,438 DEBUG [RS_CLOSE_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070/.tmp/cf1/6e5d60010e654178aac0538018bcd279 is 35, key is r1/cf1:q/1733848477239/Put/seqid=0 2024-12-10T16:34:37,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741887_1065 (size=4783) 2024-12-10T16:34:37,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741887_1065 (size=4783) 2024-12-10T16:34:37,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741887_1065 (size=4783) 2024-12-10T16:34:37,446 INFO [RS_CLOSE_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070/.tmp/cf1/6e5d60010e654178aac0538018bcd279 2024-12-10T16:34:37,453 DEBUG [RS_CLOSE_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070/.tmp/cf1/6e5d60010e654178aac0538018bcd279 as hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070/cf1/6e5d60010e654178aac0538018bcd279 2024-12-10T16:34:37,460 INFO [RS_CLOSE_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070/cf1/6e5d60010e654178aac0538018bcd279, entries=1, sequenceid=5, filesize=4.7 K 2024-12-10T16:34:37,461 INFO [RS_CLOSE_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~31 B/31, heapSize ~344 B/344, currentSize=0 B/0 for 3c4a0fab46c2c0263b38e45209989070 in 39ms, sequenceid=5, compaction requested=false 2024-12-10T16:34:37,461 DEBUG [RS_CLOSE_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testReplayEditsAfterRegionMovedWithMultiCF' 2024-12-10T16:34:37,467 DEBUG [RS_CLOSE_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-10T16:34:37,469 INFO [RS_CLOSE_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. 2024-12-10T16:34:37,470 DEBUG [RS_CLOSE_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1676): Region close journal for 3c4a0fab46c2c0263b38e45209989070: Waiting for close lock at 1733848477422Running coprocessor pre-close hooks at 1733848477422Disabling compacts and flushes for region at 1733848477422Disabling writes for close at 1733848477422Obtaining lock to block concurrent updates at 1733848477422Preparing flush snapshotting stores in 3c4a0fab46c2c0263b38e45209989070 at 1733848477422Finished memstore snapshotting testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070., syncing WAL and waiting on mvcc, flushsize=dataSize=31, getHeapSize=584, getOffHeapSize=0, getCellsCount=1 at 1733848477422Flushing stores of testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. at 1733848477423 (+1 ms)Flushing 3c4a0fab46c2c0263b38e45209989070/cf1: creating writer at 1733848477423Flushing 3c4a0fab46c2c0263b38e45209989070/cf1: appending metadata at 1733848477438 (+15 ms)Flushing 3c4a0fab46c2c0263b38e45209989070/cf1: closing flushed file at 1733848477438Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@780b5418: reopening flushed file at 1733848477452 (+14 ms)Finished flush of dataSize ~31 B/31, heapSize ~344 B/344, currentSize=0 B/0 for 3c4a0fab46c2c0263b38e45209989070 in 39ms, sequenceid=5, compaction requested=false at 1733848477461 (+9 ms)Writing region close event to WAL at 1733848477463 (+2 ms)Running coprocessor post-close hooks at 1733848477468 (+5 ms)Closed at 1733848477469 (+1 ms) 2024-12-10T16:34:37,470 INFO [RS_CLOSE_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegionServer(3302): Adding 3c4a0fab46c2c0263b38e45209989070 move to 4b7737f37de9,40043,1733848461924 record at close sequenceid=5 2024-12-10T16:34:37,473 INFO [RS_CLOSE_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] handler.UnassignRegionHandler(157): Closed 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:37,474 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=7 updating hbase:meta row=3c4a0fab46c2c0263b38e45209989070, regionState=CLOSED 2024-12-10T16:34:37,477 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3c4a0fab46c2c0263b38e45209989070, server=4b7737f37de9,35753,1733848461832 because future has completed 2024-12-10T16:34:37,482 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-10T16:34:37,483 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; CloseRegionProcedure 3c4a0fab46c2c0263b38e45209989070, server=4b7737f37de9,35753,1733848461832 in 217 msec 2024-12-10T16:34:37,484 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=3c4a0fab46c2c0263b38e45209989070, REOPEN/MOVE; state=CLOSED, location=4b7737f37de9,40043,1733848461924; forceNewPlan=false, retain=false 2024-12-10T16:34:37,634 INFO [4b7737f37de9:42829 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-10T16:34:37,634 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=7 updating hbase:meta row=3c4a0fab46c2c0263b38e45209989070, regionState=OPENING, regionLocation=4b7737f37de9,40043,1733848461924 2024-12-10T16:34:37,638 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=3c4a0fab46c2c0263b38e45209989070, REOPEN/MOVE because future has completed 2024-12-10T16:34:37,639 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=7, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3c4a0fab46c2c0263b38e45209989070, server=4b7737f37de9,40043,1733848461924}] 2024-12-10T16:34:37,797 INFO [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] handler.AssignRegionHandler(132): Open testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. 2024-12-10T16:34:37,797 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(7752): Opening region: {ENCODED => 3c4a0fab46c2c0263b38e45209989070, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070.', STARTKEY => '', ENDKEY => ''} 2024-12-10T16:34:37,798 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterRegionMovedWithMultiCF 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:37,798 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T16:34:37,798 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(7794): checking encryption for 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:37,798 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(7797): checking classloading for 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:37,800 INFO [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:37,801 INFO [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3c4a0fab46c2c0263b38e45209989070 columnFamilyName cf1 2024-12-10T16:34:37,801 DEBUG [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:37,810 DEBUG [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070/cf1/6e5d60010e654178aac0538018bcd279 2024-12-10T16:34:37,810 INFO [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] regionserver.HStore(327): Store=3c4a0fab46c2c0263b38e45209989070/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:37,810 INFO [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:37,811 INFO [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3c4a0fab46c2c0263b38e45209989070 columnFamilyName cf2 2024-12-10T16:34:37,812 DEBUG [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:37,812 INFO [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] regionserver.HStore(327): Store=3c4a0fab46c2c0263b38e45209989070/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:37,812 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1038): replaying wal for 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:37,813 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:37,815 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:37,816 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1048): stopping wal replay for 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:37,816 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1060): Cleaning up temporary data for 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:37,817 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterRegionMovedWithMultiCF descriptor;using region.getMemStoreFlushHeapSize/# of families (64.0 M)) instead. 2024-12-10T16:34:37,818 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1093): writing seq id for 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:37,819 INFO [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1114): Opened 3c4a0fab46c2c0263b38e45209989070; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60317889, jitterRate=-0.10119341313838959}}}, FlushLargeStoresPolicy{flushSizeLowerBound=67108864} 2024-12-10T16:34:37,819 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:37,820 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1006): Region open journal for 3c4a0fab46c2c0263b38e45209989070: Running coprocessor pre-open hook at 1733848477798Writing region info on filesystem at 1733848477798Initializing all the Stores at 1733848477799 (+1 ms)Instantiating store for column family {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848477799Instantiating store for column family {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848477800 (+1 ms)Cleaning up temporary data from old regions at 1733848477816 (+16 ms)Running coprocessor post-open hooks at 1733848477819 (+3 ms)Region opened successfully at 1733848477820 (+1 ms) 2024-12-10T16:34:37,822 INFO [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegionServer(2236): Post open deploy tasks for testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070., pid=9, masterSystemTime=1733848477792 2024-12-10T16:34:37,825 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegionServer(2266): Finished post open deploy task for testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. 2024-12-10T16:34:37,825 INFO [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] handler.AssignRegionHandler(153): Opened testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. 2024-12-10T16:34:37,826 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=7 updating hbase:meta row=3c4a0fab46c2c0263b38e45209989070, regionState=OPEN, openSeqNum=9, regionLocation=4b7737f37de9,40043,1733848461924 2024-12-10T16:34:37,829 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3c4a0fab46c2c0263b38e45209989070, server=4b7737f37de9,40043,1733848461924 because future has completed 2024-12-10T16:34:37,833 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=7 2024-12-10T16:34:37,834 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; OpenRegionProcedure 3c4a0fab46c2c0263b38e45209989070, server=4b7737f37de9,40043,1733848461924 in 192 msec 2024-12-10T16:34:37,836 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=3c4a0fab46c2c0263b38e45209989070, REOPEN/MOVE in 581 msec 2024-12-10T16:34:37,861 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-10T16:34:37,863 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60924, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-10T16:34:37,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35753 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 139 connection: 172.17.0.3:54760 deadline: 1733848537868, exception=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=4b7737f37de9 port=40043 startCode=1733848461924. As of locationSeqNum=5. 2024-12-10T16:34:37,876 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070., hostname=4b7737f37de9,35753,1733848461832, seqNum=2 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070., hostname=4b7737f37de9,35753,1733848461832, seqNum=2, error=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=4b7737f37de9 port=40043 startCode=1733848461924. As of locationSeqNum=5. 2024-12-10T16:34:37,877 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070., hostname=4b7737f37de9,35753,1733848461832, seqNum=2 is org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=4b7737f37de9 port=40043 startCode=1733848461924. As of locationSeqNum=5. 2024-12-10T16:34:37,877 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(84): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070., hostname=4b7737f37de9,35753,1733848461832, seqNum=2 with the new location region=testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070., hostname=4b7737f37de9,40043,1733848461924, seqNum=5 constructed by org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=4b7737f37de9 port=40043 startCode=1733848461924. As of locationSeqNum=5. 2024-12-10T16:34:37,999 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 3c4a0fab46c2c0263b38e45209989070 2/2 column families, dataSize=50 B heapSize=720 B 2024-12-10T16:34:38,016 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070/.tmp/cf1/87b352890c824fef95acb728208365f9 is 29, key is r1/cf1:/1733848477989/DeleteFamily/seqid=0 2024-12-10T16:34:38,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741888_1066 (size=4906) 2024-12-10T16:34:38,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741888_1066 (size=4906) 2024-12-10T16:34:38,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741888_1066 (size=4906) 2024-12-10T16:34:38,024 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=25 B at sequenceid=12 (bloomFilter=false), to=hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070/.tmp/cf1/87b352890c824fef95acb728208365f9 2024-12-10T16:34:38,030 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 87b352890c824fef95acb728208365f9 2024-12-10T16:34:38,044 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070/.tmp/cf2/382b7acb180a417d95cbbad17be9caeb is 29, key is r1/cf2:/1733848477989/DeleteFamily/seqid=0 2024-12-10T16:34:38,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741889_1067 (size=4906) 2024-12-10T16:34:38,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741889_1067 (size=4906) 2024-12-10T16:34:38,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741889_1067 (size=4906) 2024-12-10T16:34:38,051 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=25 B at sequenceid=12 (bloomFilter=false), to=hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070/.tmp/cf2/382b7acb180a417d95cbbad17be9caeb 2024-12-10T16:34:38,057 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 382b7acb180a417d95cbbad17be9caeb 2024-12-10T16:34:38,058 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070/.tmp/cf1/87b352890c824fef95acb728208365f9 as hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070/cf1/87b352890c824fef95acb728208365f9 2024-12-10T16:34:38,065 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 87b352890c824fef95acb728208365f9 2024-12-10T16:34:38,065 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070/cf1/87b352890c824fef95acb728208365f9, entries=1, sequenceid=12, filesize=4.8 K 2024-12-10T16:34:38,066 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070/.tmp/cf2/382b7acb180a417d95cbbad17be9caeb as hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070/cf2/382b7acb180a417d95cbbad17be9caeb 2024-12-10T16:34:38,072 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 382b7acb180a417d95cbbad17be9caeb 2024-12-10T16:34:38,072 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070/cf2/382b7acb180a417d95cbbad17be9caeb, entries=1, sequenceid=12, filesize=4.8 K 2024-12-10T16:34:38,074 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~50 B/50, heapSize ~688 B/688, currentSize=0 B/0 for 3c4a0fab46c2c0263b38e45209989070 in 75ms, sequenceid=12, compaction requested=false 2024-12-10T16:34:38,074 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 3c4a0fab46c2c0263b38e45209989070: 2024-12-10T16:34:38,076 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-10T16:34:38,077 DEBUG [Time-limited test {}] regionserver.HStore(1541): 3c4a0fab46c2c0263b38e45209989070/cf1 is initiating major compaction (all files) 2024-12-10T16:34:38,077 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-10T16:34:38,077 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T16:34:38,078 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 3c4a0fab46c2c0263b38e45209989070/cf1 in testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. 2024-12-10T16:34:38,078 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070/cf1/6e5d60010e654178aac0538018bcd279, hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070/cf1/87b352890c824fef95acb728208365f9] into tmpdir=hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070/.tmp, totalSize=9.5 K 2024-12-10T16:34:38,079 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 6e5d60010e654178aac0538018bcd279, keycount=1, bloomtype=NONE, size=4.7 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1733848477239 2024-12-10T16:34:38,080 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 87b352890c824fef95acb728208365f9, keycount=1, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=9223372036854775807 2024-12-10T16:34:38,091 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 3c4a0fab46c2c0263b38e45209989070#cf1#compaction#16 average throughput is NaN MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T16:34:38,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741890_1068 (size=4626) 2024-12-10T16:34:38,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741890_1068 (size=4626) 2024-12-10T16:34:38,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741890_1068 (size=4626) 2024-12-10T16:34:38,106 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070/.tmp/cf1/452cb5bb11a747e58753c9152983a4f8 as hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070/cf1/452cb5bb11a747e58753c9152983a4f8 2024-12-10T16:34:38,120 INFO [Time-limited test {}] regionserver.HStore(1337): Completed major compaction of 2 (all) file(s) in 3c4a0fab46c2c0263b38e45209989070/cf1 of 3c4a0fab46c2c0263b38e45209989070 into 452cb5bb11a747e58753c9152983a4f8(size=4.5 K), total size for store is 4.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T16:34:38,120 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 3c4a0fab46c2c0263b38e45209989070: 2024-12-10T16:34:38,120 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-12-10T16:34:38,120 DEBUG [Time-limited test {}] regionserver.HStore(1541): 3c4a0fab46c2c0263b38e45209989070/cf2 is initiating major compaction (all files) 2024-12-10T16:34:38,120 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-10T16:34:38,120 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T16:34:38,120 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 3c4a0fab46c2c0263b38e45209989070/cf2 in testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. 2024-12-10T16:34:38,120 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070/cf2/382b7acb180a417d95cbbad17be9caeb] into tmpdir=hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070/.tmp, totalSize=4.8 K 2024-12-10T16:34:38,121 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 382b7acb180a417d95cbbad17be9caeb, keycount=1, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=9223372036854775807 2024-12-10T16:34:38,127 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 3c4a0fab46c2c0263b38e45209989070#cf2#compaction#17 average throughput is NaN MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T16:34:38,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741891_1069 (size=4592) 2024-12-10T16:34:38,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741891_1069 (size=4592) 2024-12-10T16:34:38,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741891_1069 (size=4592) 2024-12-10T16:34:38,142 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070/.tmp/cf2/5073f024fb8947f1ba78b2876d1b4660 as hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070/cf2/5073f024fb8947f1ba78b2876d1b4660 2024-12-10T16:34:38,150 INFO [Time-limited test {}] regionserver.HStore(1337): Completed major compaction of 1 (all) file(s) in 3c4a0fab46c2c0263b38e45209989070/cf2 of 3c4a0fab46c2c0263b38e45209989070 into 5073f024fb8947f1ba78b2876d1b4660(size=4.5 K), total size for store is 4.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T16:34:38,150 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 3c4a0fab46c2c0263b38e45209989070: 2024-12-10T16:34:38,155 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42829 {}] master.HMaster(2410): Client=jenkins//172.17.0.3 move hri=3c4a0fab46c2c0263b38e45209989070, source=4b7737f37de9,40043,1733848461924, destination=4b7737f37de9,35753,1733848461832, warming up region on 4b7737f37de9,35753,1733848461832 2024-12-10T16:34:38,155 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42829 {}] master.HMaster(2414): Client=jenkins//172.17.0.3 move hri=3c4a0fab46c2c0263b38e45209989070, source=4b7737f37de9,40043,1733848461924, destination=4b7737f37de9,35753,1733848461832, running balancer 2024-12-10T16:34:38,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42829 {}] procedure2.ProcedureExecutor(1139): Stored pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=3c4a0fab46c2c0263b38e45209989070, REOPEN/MOVE 2024-12-10T16:34:38,157 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=3c4a0fab46c2c0263b38e45209989070, REOPEN/MOVE 2024-12-10T16:34:38,158 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=3c4a0fab46c2c0263b38e45209989070, regionState=CLOSING, regionLocation=4b7737f37de9,40043,1733848461924 2024-12-10T16:34:38,159 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35753 {}] regionserver.RSRpcServices(2066): Warmup testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. 2024-12-10T16:34:38,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35753 {}] regionserver.HRegion(7855): Warmup {ENCODED => 3c4a0fab46c2c0263b38e45209989070, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070.', STARTKEY => '', ENDKEY => ''} 2024-12-10T16:34:38,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35753 {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T16:34:38,159 INFO [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:38,160 INFO [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3c4a0fab46c2c0263b38e45209989070 columnFamilyName cf1 2024-12-10T16:34:38,161 DEBUG [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:38,161 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=3c4a0fab46c2c0263b38e45209989070, REOPEN/MOVE because future has completed 2024-12-10T16:34:38,162 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T16:34:38,162 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3c4a0fab46c2c0263b38e45209989070, server=4b7737f37de9,40043,1733848461924}] 2024-12-10T16:34:38,169 DEBUG [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070/cf1/452cb5bb11a747e58753c9152983a4f8 2024-12-10T16:34:38,174 DEBUG [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070/cf1/6e5d60010e654178aac0538018bcd279 2024-12-10T16:34:38,180 INFO [StoreFileOpener-3c4a0fab46c2c0263b38e45209989070-cf1-1 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 87b352890c824fef95acb728208365f9 2024-12-10T16:34:38,180 DEBUG [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070/cf1/87b352890c824fef95acb728208365f9 2024-12-10T16:34:38,180 INFO [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] regionserver.HStore(327): Store=3c4a0fab46c2c0263b38e45209989070/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:38,180 INFO [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:38,181 INFO [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3c4a0fab46c2c0263b38e45209989070 columnFamilyName cf2 2024-12-10T16:34:38,182 DEBUG [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:38,189 INFO [StoreFileOpener-3c4a0fab46c2c0263b38e45209989070-cf2-1 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 382b7acb180a417d95cbbad17be9caeb 2024-12-10T16:34:38,189 DEBUG [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070/cf2/382b7acb180a417d95cbbad17be9caeb 2024-12-10T16:34:38,194 DEBUG [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070/cf2/5073f024fb8947f1ba78b2876d1b4660 2024-12-10T16:34:38,194 INFO [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] regionserver.HStore(327): Store=3c4a0fab46c2c0263b38e45209989070/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:38,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35753 {}] regionserver.HRegion(1722): Closing 3c4a0fab46c2c0263b38e45209989070, disabling compactions & flushes 2024-12-10T16:34:38,195 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35753 {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. 2024-12-10T16:34:38,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35753 {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. 2024-12-10T16:34:38,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35753 {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. after waiting 0 ms 2024-12-10T16:34:38,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35753 {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. 2024-12-10T16:34:38,196 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35753 {}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. 2024-12-10T16:34:38,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35753 {}] regionserver.HRegion(1676): Region close journal for 3c4a0fab46c2c0263b38e45209989070: Waiting for close lock at 1733848478195Disabling compacts and flushes for region at 1733848478195Disabling writes for close at 1733848478195Writing region close event to WAL at 1733848478196 (+1 ms)Closed at 1733848478196 2024-12-10T16:34:38,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42829 {}] procedure.ProcedureSyncWait(219): waitFor pid=10 2024-12-10T16:34:38,318 INFO [RS_CLOSE_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] handler.UnassignRegionHandler(122): Close 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:38,318 DEBUG [RS_CLOSE_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-10T16:34:38,318 DEBUG [RS_CLOSE_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1722): Closing 3c4a0fab46c2c0263b38e45209989070, disabling compactions & flushes 2024-12-10T16:34:38,319 INFO [RS_CLOSE_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. 2024-12-10T16:34:38,319 DEBUG [RS_CLOSE_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. 2024-12-10T16:34:38,319 DEBUG [RS_CLOSE_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. after waiting 0 ms 2024-12-10T16:34:38,319 DEBUG [RS_CLOSE_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. 2024-12-10T16:34:38,321 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070/cf1/6e5d60010e654178aac0538018bcd279, hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070/cf1/87b352890c824fef95acb728208365f9] to archive 2024-12-10T16:34:38,325 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T16:34:38,329 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070/cf1/87b352890c824fef95acb728208365f9 to hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/archive/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070/cf1/87b352890c824fef95acb728208365f9 2024-12-10T16:34:38,330 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070/cf1/6e5d60010e654178aac0538018bcd279 to hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/archive/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070/cf1/6e5d60010e654178aac0538018bcd279 2024-12-10T16:34:38,342 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070/cf2/382b7acb180a417d95cbbad17be9caeb] to archive 2024-12-10T16:34:38,343 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T16:34:38,345 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070/cf2/382b7acb180a417d95cbbad17be9caeb to hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/archive/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070/cf2/382b7acb180a417d95cbbad17be9caeb 2024-12-10T16:34:38,350 DEBUG [RS_CLOSE_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070/recovered.edits/17.seqid, newMaxSeqId=17, maxSeqId=8 2024-12-10T16:34:38,351 INFO [RS_CLOSE_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. 2024-12-10T16:34:38,351 DEBUG [RS_CLOSE_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1676): Region close journal for 3c4a0fab46c2c0263b38e45209989070: Waiting for close lock at 1733848478318Running coprocessor pre-close hooks at 1733848478318Disabling compacts and flushes for region at 1733848478318Disabling writes for close at 1733848478319 (+1 ms)Writing region close event to WAL at 1733848478346 (+27 ms)Running coprocessor post-close hooks at 1733848478351 (+5 ms)Closed at 1733848478351 2024-12-10T16:34:38,351 INFO [RS_CLOSE_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegionServer(3302): Adding 3c4a0fab46c2c0263b38e45209989070 move to 4b7737f37de9,35753,1733848461832 record at close sequenceid=12 2024-12-10T16:34:38,353 INFO [RS_CLOSE_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] handler.UnassignRegionHandler(157): Closed 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:38,354 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=3c4a0fab46c2c0263b38e45209989070, regionState=CLOSED 2024-12-10T16:34:38,356 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=10, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3c4a0fab46c2c0263b38e45209989070, server=4b7737f37de9,40043,1733848461924 because future has completed 2024-12-10T16:34:38,360 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=10 2024-12-10T16:34:38,360 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=10, state=SUCCESS, hasLock=false; CloseRegionProcedure 3c4a0fab46c2c0263b38e45209989070, server=4b7737f37de9,40043,1733848461924 in 195 msec 2024-12-10T16:34:38,361 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=3c4a0fab46c2c0263b38e45209989070, REOPEN/MOVE; state=CLOSED, location=4b7737f37de9,35753,1733848461832; forceNewPlan=false, retain=false 2024-12-10T16:34:38,512 INFO [4b7737f37de9:42829 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-10T16:34:38,513 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=3c4a0fab46c2c0263b38e45209989070, regionState=OPENING, regionLocation=4b7737f37de9,35753,1733848461832 2024-12-10T16:34:38,522 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=3c4a0fab46c2c0263b38e45209989070, REOPEN/MOVE because future has completed 2024-12-10T16:34:38,522 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3c4a0fab46c2c0263b38e45209989070, server=4b7737f37de9,35753,1733848461832}] 2024-12-10T16:34:38,685 INFO [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. 2024-12-10T16:34:38,686 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 3c4a0fab46c2c0263b38e45209989070, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070.', STARTKEY => '', ENDKEY => ''} 2024-12-10T16:34:38,686 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterRegionMovedWithMultiCF 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:38,687 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T16:34:38,687 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:38,687 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:38,690 INFO [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:38,691 INFO [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3c4a0fab46c2c0263b38e45209989070 columnFamilyName cf1 2024-12-10T16:34:38,691 DEBUG [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:38,701 DEBUG [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070/cf1/452cb5bb11a747e58753c9152983a4f8 2024-12-10T16:34:38,701 INFO [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] regionserver.HStore(327): Store=3c4a0fab46c2c0263b38e45209989070/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:38,702 INFO [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:38,703 INFO [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3c4a0fab46c2c0263b38e45209989070 columnFamilyName cf2 2024-12-10T16:34:38,703 DEBUG [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:38,708 DEBUG [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070/cf2/5073f024fb8947f1ba78b2876d1b4660 2024-12-10T16:34:38,708 INFO [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] regionserver.HStore(327): Store=3c4a0fab46c2c0263b38e45209989070/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:38,709 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:38,710 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:38,711 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:38,711 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:38,712 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:38,712 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterRegionMovedWithMultiCF descriptor;using region.getMemStoreFlushHeapSize/# of families (64.0 M)) instead. 2024-12-10T16:34:38,714 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:38,715 INFO [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 3c4a0fab46c2c0263b38e45209989070; next sequenceid=18; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65418795, jitterRate=-0.025183990597724915}}}, FlushLargeStoresPolicy{flushSizeLowerBound=67108864} 2024-12-10T16:34:38,715 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:38,716 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 3c4a0fab46c2c0263b38e45209989070: Running coprocessor pre-open hook at 1733848478688Writing region info on filesystem at 1733848478688Initializing all the Stores at 1733848478689 (+1 ms)Instantiating store for column family {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848478689Instantiating store for column family {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848478690 (+1 ms)Cleaning up temporary data from old regions at 1733848478712 (+22 ms)Running coprocessor post-open hooks at 1733848478715 (+3 ms)Region opened successfully at 1733848478716 (+1 ms) 2024-12-10T16:34:38,717 INFO [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070., pid=12, masterSystemTime=1733848478675 2024-12-10T16:34:38,720 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. 2024-12-10T16:34:38,720 INFO [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. 2024-12-10T16:34:38,721 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=3c4a0fab46c2c0263b38e45209989070, regionState=OPEN, openSeqNum=18, regionLocation=4b7737f37de9,35753,1733848461832 2024-12-10T16:34:38,723 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3c4a0fab46c2c0263b38e45209989070, server=4b7737f37de9,35753,1733848461832 because future has completed 2024-12-10T16:34:38,726 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=10 2024-12-10T16:34:38,726 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 3c4a0fab46c2c0263b38e45209989070, server=4b7737f37de9,35753,1733848461832 in 202 msec 2024-12-10T16:34:38,728 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=3c4a0fab46c2c0263b38e45209989070, REOPEN/MOVE in 571 msec 2024-12-10T16:34:38,759 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-10T16:34:38,761 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54772, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-10T16:34:38,763 ERROR [Time-limited test {}] regionserver.HRegionServer(2442): ***** ABORTING region server 4b7737f37de9,35753,1733848461832: testing ***** 2024-12-10T16:34:38,764 ERROR [Time-limited test {}] regionserver.HRegionServer(2447): RegionServer abort: loaded coprocessors are: [org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint] 2024-12-10T16:34:38,766 DEBUG [Time-limited test {}] util.JSONBean(135): Listing beans for java.lang:type=Memory 2024-12-10T16:34:38,768 DEBUG [Time-limited test {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=IPC 2024-12-10T16:34:38,770 DEBUG [Time-limited test {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Replication 2024-12-10T16:34:38,771 DEBUG [Time-limited test {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Server 2024-12-10T16:34:38,779 INFO [Time-limited test {}] regionserver.HRegionServer(2451): Dump of metrics as JSON on abort: { "beans": [ { "name": "java.lang:type=Memory", "modelerType": "sun.management.MemoryImpl", "ObjectPendingFinalizationCount": 0, "HeapMemoryUsage": { "committed": 1048576000, "init": 1048576000, "max": 2306867200, "used": 326739392 }, "NonHeapMemoryUsage": { "committed": 171573248, "init": 7667712, "max": -1, "used": 168928456 }, "Verbose": false, "ObjectName": "java.lang:type=Memory" } ], "beans": [ { "name": "Hadoop:service=HBase,name=RegionServer,sub=IPC", "modelerType": "RegionServer,sub=IPC", "tag.Context": "regionserver", "tag.Hostname": "4b7737f37de9", "queueSize": 0, "numCallsInGeneralQueue": 0, "numCallsInReplicationQueue": 0, "numCallsInBulkLoadQueue": 0, "numCallsInPriorityQueue": 0, "numCallsInMetaPriorityQueue": 0, "numOpenConnections": 0, "numActiveHandler": 0, "numActiveGeneralHandler": 0, "numActivePriorityHandler": 0, "numActiveReplicationHandler": 0, "numGeneralCallsDropped": 0, "numLifoModeSwitches": 0, "numCallsInWriteQueue": 0, "numActiveBulkLoadHandler": 0, "numCallsInReadQueue": 0, "numCallsInScanQueue": 0, "numActiveWriteHandler": 0, "numActiveReadHandler": 0, "numActiveScanHandler": 0, "nettyDirectMemoryUsage": 67108864, "nettyTotalPendingOutboundBytes": 0, "nettyMaxPendingOutboundBytes": 0, "receivedBytes": 2071, "exceptions.RegionMovedException": 0, "authenticationSuccesses": 0, "authorizationFailures": 0, "exceptions.requestTooBig": 0, "UnwritableTime_num_ops": 0, "UnwritableTime_min": 0, "UnwritableTime_max": 0, "UnwritableTime_mean": 0, "UnwritableTime_25th_percentile": 0, "UnwritableTime_median": 0, "UnwritableTime_75th_percentile": 0, "UnwritableTime_90th_percentile": 0, "UnwritableTime_95th_percentile": 0, "UnwritableTime_98th_percentile": 0, "UnwritableTime_99th_percentile": 0, "UnwritableTime_99.9th_percentile": 0, "exceptions.OutOfOrderScannerNextException": 0, "exceptions.rpcThrottling": 0, "exceptions.otherExceptions": 0, "ProcessCallTime_num_ops": 8, "ProcessCallTime_min": 0, "ProcessCallTime_max": 9, "ProcessCallTime_mean": 3, "ProcessCallTime_25th_percentile": 2, "ProcessCallTime_median": 4, "ProcessCallTime_75th_percentile": 6, "ProcessCallTime_90th_percentile": 8, "ProcessCallTime_95th_percentile": 8, "ProcessCallTime_98th_percentile": 8, "ProcessCallTime_99th_percentile": 8, "ProcessCallTime_99.9th_percentile": 8, "ProcessCallTime_TimeRangeCount_0-1": 8, "exceptions.callQueueTooBig": 0, "QueueCallTime_num_ops": 8, "QueueCallTime_min": 0, "QueueCallTime_max": 1, "QueueCallTime_mean": 0, "QueueCallTime_25th_percentile": 0, "QueueCallTime_median": 0, "QueueCallTime_75th_percentile": 0, "QueueCallTime_90th_percentile": 0, "QueueCallTime_95th_percentile": 0, "QueueCallTime_98th_percentile": 0, "QueueCallTime_99th_percentile": 0, "QueueCallTime_99.9th_percentile": 0, "QueueCallTime_TimeRangeCount_0-1": 8, "authenticationFailures": 0, "exceptions.multiResponseTooLarge": 0, "exceptions.callDropped": 0, "TotalCallTime_num_ops": 8, "TotalCallTime_min": 1, "TotalCallTime_max": 9, "TotalCallTime_mean": 4, "TotalCallTime_25th_percentile": 3, "TotalCallTime_median": 5, "TotalCallTime_75th_percentile": 7, "TotalCallTime_90th_percentile": 8, "TotalCallTime_95th_percentile": 8, "TotalCallTime_98th_percentile": 8, "TotalCallTime_99th_percentile": 8, "TotalCallTime_99.9th_percentile": 8, "TotalCallTime_TimeRangeCount_0-1": 8, "exceptions.RegionTooBusyException": 0, "exceptions.FailedSanityCheckException": 0, "ResponseSize_num_ops": 8, "ResponseSize_min": 0, "ResponseSize_max": 174, "ResponseSize_mean": 50, "ResponseSize_25th_percentile": 43, "ResponseSize_median": 87, "ResponseSize_75th_percentile": 130, "ResponseSize_90th_percentile": 156, "ResponseSize_95th_percentile": 165, "ResponseSize_98th_percentile": 170, "ResponseSize_99th_percentile": 172, "ResponseSize_99.9th_percentile": 173, "ResponseSize_SizeRangeCount_0-10": 8, "exceptions.UnknownScannerException": 0, "exceptions": 0, "maxOutboundBytesExceeded": 0, "authenticationFallbacks": 0, "exceptions.quotaExceeded": 0, "exceptions.callTimedOut": 0, "exceptions.NotServingRegionException": 0, "authorizationSuccesses": 0, "exceptions.ScannerResetException": 0, "RequestSize_num_ops": 8, "RequestSize_min": 89, "RequestSize_max": 121, "RequestSize_mean": 103, "RequestSize_25th_percentile": 97, "RequestSize_median": 105, "RequestSize_75th_percentile": 113, "RequestSize_90th_percentile": 117, "RequestSize_95th_percentile": 119, "RequestSize_98th_percentile": 120, "RequestSize_99th_percentile": 120, "RequestSize_99.9th_percentile": 120, "RequestSize_SizeRangeCount_0-10": 8, "sentBytes": 348 } ], "beans": [ { "name": "Hadoop:service=HBase,name=RegionServer,sub=Replication", "modelerType": "RegionServer,sub=Replication", "tag.Context": "regionserver", "tag.Hostname": "4b7737f37de9", "source.shippedHFiles": 0, "Source.ageOfLastShippedOp_num_ops": 0, "Source.ageOfLastShippedOp_min": 0, "Source.ageOfLastShippedOp_max": 0, "Source.ageOfLastShippedOp_mean": 0, "Source.ageOfLastShippedOp_25th_percentile": 0, "Source.ageOfLastShippedOp_median": 0, "Source.ageOfLastShippedOp_75th_percentile": 0, "Source.ageOfLastShippedOp_90th_percentile": 0, "Source.ageOfLastShippedOp_95th_percentile": 0, "Source.ageOfLastShippedOp_98th_percentile": 0, "Source.ageOfLastShippedOp_99th_percentile": 0, "Source.ageOfLastShippedOp_99.9th_percentile": 0, "source.uncleanlyClosedLogs": 0, "source.closedLogsWithUnknownFileLength": 0, "source.walReaderEditsBufferUsage": 0, "source.repeatedLogFileBytes": 0, "source.sizeOfHFileRefsQueue": 0, "source.logReadInBytes": 0, "source.completedRecoverQueues": 0, "source.sizeOfLogQueue": 0, "source.restartedLogReading": 0, "source.failedRecoverQueues": 0, "source.ignoredUncleanlyClosedLogContentsInBytes": 0, "Sink.ageOfLastAppliedOp_num_ops": 0, "Sink.ageOfLastAppliedOp_min": 0, "Sink.ageOfLastAppliedOp_max": 0, "Sink.ageOfLastAppliedOp_mean": 0, "Sink.ageOfLastAppliedOp_25th_percentile": 0, "Sink.ageOfLastAppliedOp_median": 0, "Sink.ageOfLastAppliedOp_75th_percentile": 0, "Sink.ageOfLastAppliedOp_90th_percentile": 0, "Sink.ageOfLastAppliedOp_95th_percentile": 0, "Sink.ageOfLastAppliedOp_98th_percentile": 0, "Sink.ageOfLastAppliedOp_99th_percentile": 0, "Sink.ageOfLastAppliedOp_99.9th_percentile": 0, "source.logEditsRead": 0, "source.numInitializing": 0, "source.shippedOps": 0, "sink.appliedHFiles": 0, "source.logEditsFiltered": 0, "source.shippedBytes": 0, "sink.appliedOps": 0, "source.completedLogs": 0, "source.failedBatches": 0, "sink.failedBatches": 0, "source.shippedBatches": 0, "sink.appliedBatches": 0 } ], "beans": [ { "name": "Hadoop:service=HBase,name=RegionServer,sub=Server", "modelerType": "RegionServer,sub=Server", "tag.zookeeperQuorum": "127.0.0.1:53765", "tag.serverName": "4b7737f37de9,35753,1733848461832", "tag.clusterId": "95428ff9-68ba-4f3a-a203-efee9a6f1c7c", "tag.Context": "regionserver", "tag.Hostname": "4b7737f37de9", "regionCount": 0, "storeCount": 0, "hlogFileCount": 1, "hlogFileSize": 0, "storeFileCount": 0, "maxStoreFileCount": 0, "memStoreSize": 0, "memStoreHeapSize": 0, "memStoreOffHeapSize": 0, "storeFileSize": 0, "storeFileSizeGrowthRate": 0.0, "maxStoreFileAge": 0, "minStoreFileAge": 0, "avgStoreFileAge": 0, "numReferenceFiles": 0, "regionServerStartTime": 1733848461832, "averageRegionSize": 0, "storeFileIndexSize": 0, "staticIndexSize": 0, "staticBloomSize": 0, "bloomFilterRequestsCount": 0, "bloomFilterNegativeResultsCount": 0, "bloomFilterEligibleRequestsCount": 0, "mutationsWithoutWALCount": 0, "mutationsWithoutWALSize": 0, "percentFilesLocal": 0.0, "percentFilesLocalSecondaryRegions": 0.0, "totalBytesRead": 10034, "localBytesRead": 10034, "shortCircuitBytesRead": 0, "zeroCopyBytesRead": 0, "splitQueueLength": 0, "compactionQueueLength": 0, "smallCompactionQueueLength": 0, "largeCompactionQueueLength": 0, "flushQueueLength": 0, "blockCacheFreeSize": 922070024, "blockCacheCount": 0, "blockCacheDataBlockCount": 0, "blockCacheSize": 676856, "blockCacheCountHitPercent": 0.0, "blockCacheExpressHitPercent": 0.0, "l1CacheSize": 676856, "l1CacheFreeSize": 922070024, "l1CacheCount": 0, "l1CacheEvictionCount": 0, "l1CacheHitCount": 0, "l1CacheMissCount": 0, "l1CacheHitRatio": 0.0, "l1CacheMissRatio": 0.0, "l2CacheSize": 0, "l2CacheFreeSize": 0, "l2CacheCount": 0, "l2CacheEvictionCount": 0, "l2CacheHitCount": 0, "l2CacheMissCount": 0, "l2CacheHitRatio": 0.0, "l2CacheMissRatio": 0.0, "mobFileCacheCount": 0, "mobFileCacheHitPercent": 0.0, "readRequestRatePerSecond": 0.0, "writeRequestRatePerSecond": 0.0, "ByteBuffAllocatorHeapAllocationBytes": 4782, "ByteBuffAllocatorPoolAllocationBytes": 0, "ByteBuffAllocatorHeapAllocationRatio": 0.0, "ByteBuffAllocatorTotalBufferCount": 186, "ByteBuffAllocatorUsedBufferCount": 0, "activeScanners": 0, "totalRequestCount": 0, "totalRowActionRequestCount": 0, "readRequestCount": 0, "cpRequestCount": 0, "filteredReadRequestCount": 0, "writeRequestCount": 0, "rpcGetRequestCount": 0, "rpcFullScanRequestCount": 0, "rpcScanRequestCount": 0, "rpcMultiRequestCount": 0, "rpcMutateRequestCount": 0, "checkMutateFailedCount": 0, "checkMutatePassedCount": 0, "blockCacheHitCount": 0, "blockCacheHitCountPrimary": 0, "blockCacheHitCachingCount": 0, "blockCacheMissCount": 0, "blockCacheMissCountPrimary": 0, "blockCacheMissCachingCount": 0, "blockCacheEvictionCount": 0, "blockCacheEvictionCountPrimary": 0, "blockCacheFailedInsertionCount": 0, "blockCacheDataMissCount": 0, "blockCacheLeafIndexMissCount": 0, "blockCacheBloomChunkMissCount": 0, "blockCacheMetaMissCount": 0, "blockCacheRootIndexMissCount": 0, "blockCacheIntermediateIndexMissCount": 0, "blockCacheFileInfoMissCount": 0, "blockCacheGeneralBloomMetaMissCount": 0, "blockCacheDeleteFamilyBloomMissCount": 0, "blockCacheTrailerMissCount": 0, "blockCacheDataHitCount": 0, "blockCacheLeafIndexHitCount": 0, "blockCacheBloomChunkHitCount": 0, "blockCacheMetaHitCount": 0, "blockCacheRootIndexHitCount": 0, "blockCacheIntermediateIndexHitCount": 0, "blockCacheFileInfoHitCount": 0, "blockCacheGeneralBloomMetaHitCount": 0, "blockCacheDeleteFamilyBloomHitCount": 0, "blockCacheTrailerHitCount": 0, "updatesBlockedTime": 0, "flushedCellsCount": 0, "compactedCellsCount": 0, "majorCompactedCellsCount": 0, "flushedCellsSize": 0, "compactedCellsSize": 0, "majorCompactedCellsSize": 0, "cellsCountCompactedFromMob": 0, "cellsCountCompactedToMob": 0, "cellsSizeCompactedFromMob": 0, "cellsSizeCompactedToMob": 0, "mobFlushCount": 0, "mobFlushedCellsCount": 0, "mobFlushedCellsSize": 0, "mobScanCellsCount": 0, "mobScanCellsSize": 0, "mobFileCacheAccessCount": 0, "mobFileCacheMissCount": 0, "mobFileCacheEvictedCount": 0, "hedgedReads": 0, "hedgedReadWins": 0, "hedgedReadOpsInCurThread": 0, "blockedRequestCount": 0, "CheckAndMutate_num_ops": 0, "CheckAndMutate_min": 0, "CheckAndMutate_max": 0, "CheckAndMutate_mean": 0, "CheckAndMutate_25th_percentile": 0, "CheckAndMutate_median": 0, "CheckAndMutate_75th_percentile": 0, "CheckAndMutate_90th_percentile": 0, "CheckAndMutate_95th_percentile": 0, "CheckAndMutate_98th_percentile": 0, "CheckAndMutate_99th_percentile": 0, "CheckAndMutate_99.9th_percentile": 0, "MajorCompactionTime_num_ops": 0, "MajorCompactionTime_min": 0, "MajorCompactionTime_max": 0, "MajorCompactionTime_mean": 0, "MajorCompactionTime_25th_percentile": 0, "MajorCompactionTime_median": 0, "MajorCompactionTime_75th_percentile": 0, "MajorCompactionTime_90th_percentile": 0, "MajorCompactionTime_95th_percentile": 0, "MajorCompactionTime_98th_percentile": 0, "MajorCompactionTime_99th_percentile": 0, "MajorCompactionTime_99.9th_percentile": 0, "ScanTime_num_ops": 0, "ScanTime_min": 0, "ScanTime_max": 0, "ScanTime_mean": 0, "ScanTime_25th_percentile": 0, "ScanTime_median": 0, "ScanTime_75th_percentile": 0, "ScanTime_90th_percentile": 0, "ScanTime_95th_percentile": 0, "ScanTime_98th_percentile": 0, "ScanTime_99th_percentile": 0, "ScanTime_99.9th_percentile": 0, "CheckAndMutateBlockBytesScanned_num_ops": 0, "CheckAndMutateBlockBytesScanned_min": 0, "CheckAndMutateBlockBytesScanned_max": 0, "CheckAndMutateBlockBytesScanned_mean": 0, "CheckAndMutateBlockBytesScanned_25th_percentile": 0, "CheckAndMutateBlockBytesScanned_median": 0, "CheckAndMutateBlockBytesScanned_75th_percentile": 0, "CheckAndMutateBlockBytesScanned_90th_percentile": 0, "CheckAndMutateBlockBytesScanned_95th_percentile": 0, "CheckAndMutateBlockBytesScanned_98th_percentile": 0, "CheckAndMutateBlockBytesScanned_99th_percentile": 0, "CheckAndMutateBlockBytesScanned_99.9th_percentile": 0, "Put_num_ops": 0, "Put_min": 0, "Put_max": 0, "Put_mean": 0, "Put_25th_percentile": 0, "Put_median": 0, "Put_75th_percentile": 0, "Put_90th_percentile": 0, "Put_95th_percentile": 0, "Put_98th_percentile": 0, "Put_99th_percentile": 0, "Put_99.9th_percentile": 0, "splitRequestCount": 0, "AppendBlockBytesScanned_num_ops": 0, "AppendBlockBytesScanned_min": 0, "AppendBlockBytesScanned_max": 0, "AppendBlockBytesScanned_mean": 0, "AppendBlockBytesScanned_25th_percentile": 0, "AppendBlockBytesScanned_median": 0, "AppendBlockBytesScanned_75th_percentile": 0, "AppendBlockBytesScanned_90th_percentile": 0, "AppendBlockBytesScanned_95th_percentile": 0, "AppendBlockBytesScanned_98th_percentile": 0, "AppendBlockBytesScanned_99th_percentile": 0, "AppendBlockBytesScanned_99.9th_percentile": 0, "PutBatch_num_ops": 0, "PutBatch_min": 0, "PutBatch_max": 0, "PutBatch_mean": 0, "PutBatch_25th_percentile": 0, "PutBatch_median": 0, "PutBatch_75th_percentile": 0, "PutBatch_90th_percentile": 0, "PutBatch_95th_percentile": 0, "PutBatch_98th_percentile": 0, "PutBatch_99th_percentile": 0, "PutBatch_99.9th_percentile": 0, "IncrementBlockBytesScanned_num_ops": 0, "IncrementBlockBytesScanned_min": 0, "IncrementBlockBytesScanned_max": 0, "IncrementBlockBytesScanned_mean": 0, "IncrementBlockBytesScanned_25th_percentile": 0, "IncrementBlockBytesScanned_median": 0, "IncrementBlockBytesScanned_75th_percentile": 0, "IncrementBlockBytesScanned_90th_percentile": 0, "IncrementBlockBytesScanned_95th_percentile": 0, "IncrementBlockBytesScanned_98th_percentile": 0, "IncrementBlockBytesScanned_99th_percentile": 0, "IncrementBlockBytesScanned_99.9th_percentile": 0, "SplitTime_num_ops": 0, "SplitTime_min": 0, "SplitTime_max": 0, "SplitTime_mean": 0, "SplitTime_25th_percentile": 0, "SplitTime_median": 0, "SplitTime_75th_percentile": 0, "SplitTime_90th_percentile": 0, "SplitTime_95th_percentile": 0, "SplitTime_98th_percentile": 0, "SplitTime_99th_percentile": 0, "SplitTime_99.9th_percentile": 0, "GetBlockBytesScanned_num_ops": 0, "GetBlockBytesScanned_min": 0, "GetBlockBytesScanned_max": 0, "GetBlockBytesScanned_mean": 0, "GetBlockBytesScanned_25th_percentile": 0, "GetBlockBytesScanned_median": 0, "GetBlockBytesScanned_75th_percentile": 0, "GetBlockBytesScanned_90th_percentile": 0, "GetBlockBytesScanned_95th_percentile": 0, "GetBlockBytesScanned_98th_percentile": 0, "GetBlockBytesScanned_99th_percentile": 0, "GetBlockBytesScanned_99.9th_percentile": 0, "majorCompactedInputBytes": 0, "slowAppendCount": 0, "flushedOutputBytes": 0, "Replay_num_ops": 0, "Replay_min": 0, "Replay_max": 0, "Replay_mean": 0, "Replay_25th_percentile": 0, "Replay_median": 0, "Replay_75th_percentile": 0, "Replay_90th_percentile": 0, "Replay_95th_percentile": 0, "Replay_98th_percentile": 0, "Replay_99th_percentile": 0, "Replay_99.9th_percentile": 0, "MajorCompactionInputSize_num_ops": 0, "MajorCompactionInputSize_min": 0, "MajorCompactionInputSize_max": 0, "MajorCompactionInputSize_mean": 0, "MajorCompactionInputSize_25th_percentile": 0, "MajorCompactionInputSize_median": 0, "MajorCompactionInputSize_75th_percentile": 0, "MajorCompactionInputSize_90th_percentile": 0, "MajorCompactionInputSize_95th_percentile": 0, "MajorCompactionInputSize_98th_percentile": 0, "MajorCompactionInputSize_99th_percentile": 0, "MajorCompactionInputSize_99.9th_percentile": 0, "pauseInfoThresholdExceeded": 0, "CheckAndDelete_num_ops": 0, "CheckAndDelete_min": 0, "CheckAndDelete_max": 0, "CheckAndDelete_mean": 0, "CheckAndDelete_25th_percentile": 0, "CheckAndDelete_median": 0, "CheckAndDelete_75th_percentile": 0, "CheckAndDelete_90th_percentile": 0, "CheckAndDelete_95th_percentile": 0, "CheckAndDelete_98th_percentile": 0, "CheckAndDelete_99th_percentile": 0, "CheckAndDelete_99.9th_percentile": 0, "CompactionInputSize_num_ops": 0, "CompactionInputSize_min": 0, "CompactionInputSize_max": 0, "CompactionInputSize_mean": 0, "CompactionInputSize_25th_percentile": 0, "CompactionInputSize_median": 0, "CompactionInputSize_75th_percentile": 0, "CompactionInputSize_90th_percentile": 0, "CompactionInputSize_95th_percentile": 0, "CompactionInputSize_98th_percentile": 0, "CompactionInputSize_99th_percentile": 0, "CompactionInputSize_99.9th_percentile": 0, "flushedMemstoreBytes": 0, "majorCompactedOutputBytes": 0, "slowPutCount": 0, "compactedInputBytes": 0, "FlushOutputSize_num_ops": 0, "FlushOutputSize_min": 0, "FlushOutputSize_max": 0, "FlushOutputSize_mean": 0, "FlushOutputSize_25th_percentile": 0, "FlushOutputSize_median": 0, "FlushOutputSize_75th_percentile": 0, "FlushOutputSize_90th_percentile": 0, "FlushOutputSize_95th_percentile": 0, "FlushOutputSize_98th_percentile": 0, "FlushOutputSize_99th_percentile": 0, "FlushOutputSize_99.9th_percentile": 0, "PauseTimeWithGc_num_ops": 0, "PauseTimeWithGc_min": 0, "PauseTimeWithGc_max": 0, "PauseTimeWithGc_mean": 0, "PauseTimeWithGc_25th_percentile": 0, "PauseTimeWithGc_median": 0, "PauseTimeWithGc_75th_percentile": 0, "PauseTimeWithGc_90th_percentile": 0, "PauseTimeWithGc_95th_percentile": 0, "PauseTimeWithGc_98th_percentile": 0, "PauseTimeWithGc_99th_percentile": 0, "PauseTimeWithGc_99.9th_percentile": 0, "compactedOutputBytes": 0, "pauseWarnThresholdExceeded": 0, "ScanBlockBytesScanned_num_ops": 0, "ScanBlockBytesScanned_min": 0, "ScanBlockBytesScanned_max": 0, "ScanBlockBytesScanned_mean": 0, "ScanBlockBytesScanned_25th_percentile": 0, "ScanBlockBytesScanned_median": 0, "ScanBlockBytesScanned_75th_percentile": 0, "ScanBlockBytesScanned_90th_percentile": 0, "ScanBlockBytesScanned_95th_percentile": 0, "ScanBlockBytesScanned_98th_percentile": 0, "ScanBlockBytesScanned_99th_percentile": 0, "ScanBlockBytesScanned_99.9th_percentile": 0, "Increment_num_ops": 0, "Increment_min": 0, "Increment_max": 0, "Increment_mean": 0, "Increment_25th_percentile": 0, "Increment_median": 0, "Increment_75th_percentile": 0, "Increment_90th_percentile": 0, "Increment_95th_percentile": 0, "Increment_98th_percentile": 0, "Increment_99th_percentile": 0, "Increment_99.9th_percentile": 0, "Delete_num_ops": 0, "Delete_min": 0, "Delete_max": 0, "Delete_mean": 0, "Delete_25th_percentile": 0, "Delete_median": 0, "Delete_75th_percentile": 0, "Delete_90th_percentile": 0, "Delete_95th_percentile": 0, "Delete_98th_percentile": 0, "Delete_99th_percentile": 0, "Delete_99.9th_percentile": 0, "DeleteBatch_num_ops": 0, "DeleteBatch_min": 0, "DeleteBatch_max": 0, "DeleteBatch_mean": 0, "DeleteBatch_25th_percentile": 0, "DeleteBatch_median": 0, "DeleteBatch_75th_percentile": 0, "DeleteBatch_90th_percentile": 0, "DeleteBatch_95th_percentile": 0, "DeleteBatch_98th_percentile": 0, "DeleteBatch_99th_percentile": 0, "DeleteBatch_99.9th_percentile": 0, "blockBytesScannedCount": 0, "FlushMemstoreSize_num_ops": 0, "FlushMemstoreSize_min": 0, "FlushMemstoreSize_max": 0, "FlushMemstoreSize_mean": 0, "FlushMemstoreSize_25th_percentile": 0, "FlushMemstoreSize_median": 0, "FlushMemstoreSize_75th_percentile": 0, "FlushMemstoreSize_90th_percentile": 0, "FlushMemstoreSize_95th_percentile": 0, "FlushMemstoreSize_98th_percentile": 0, "FlushMemstoreSize_99th_percentile": 0, "FlushMemstoreSize_99.9th_percentile": 0, "CompactionInputFileCount_num_ops": 0, "CompactionInputFileCount_min": 0, "CompactionInputFileCount_max": 0, "CompactionInputFileCount_mean": 0, "CompactionInputFileCount_25th_percentile": 0, "CompactionInputFileCount_median": 0, "CompactionInputFileCount_75th_percentile": 0, "CompactionInputFileCount_90th_percentile": 0, "CompactionInputFileCount_95th_percentile": 0, "CompactionInputFileCount_98th_percentile": 0, "CompactionInputFileCount_99th_percentile": 0, "CompactionInputFileCount_99.9th_percentile": 0, "CompactionTime_num_ops": 0, "CompactionTime_min": 0, "CompactionTime_max": 0, "CompactionTime_mean": 0, "CompactionTime_25th_percentile": 0, "CompactionTime_median": 0, "CompactionTime_75th_percentile": 0, "CompactionTime_90th_percentile": 0, "CompactionTime_95th_percentile": 0, "CompactionTime_98th_percentile": 0, "CompactionTime_99th_percentile": 0, "CompactionTime_99.9th_percentile": 0, "Get_num_ops": 0, "Get_min": 0, "Get_max": 0, "Get_mean": 0, "Get_25th_percentile": 0, "Get_median": 0, "Get_75th_percentile": 0, "Get_90th_percentile": 0, "Get_95th_percentile": 0, "Get_98th_percentile": 0, "Get_99th_percentile": 0, "Get_99.9th_percentile": 0, "MajorCompactionInputFileCount_num_ops": 0, "MajorCompactionInputFileCount_min": 0, "MajorCompactionInputFileCount_max": 0, "MajorCompactionInputFileCount_mean": 0, "MajorCompactionInputFileCount_25th_percentile": 0, "MajorCompactionInputFileCount_median": 0, "MajorCompactionInputFileCount_75th_percentile": 0, "MajorCompactionInputFileCount_90th_percentile": 0, "MajorCompactionInputFileCount_95th_percentile": 0, "MajorCompactionInputFileCount_98th_percentile": 0, "MajorCompactionInputFileCount_99th_percentile": 0, "MajorCompactionInputFileCount_99.9th_percentile": 0, "scannerLeaseExpiredCount": 0, "CheckAndPut_num_ops": 0, "CheckAndPut_min": 0, "CheckAndPut_max": 0, "CheckAndPut_mean": 0, "CheckAndPut_25th_percentile": 0, "CheckAndPut_median": 0, "CheckAndPut_75th_percentile": 0, "CheckAndPut_90th_percentile": 0, "CheckAndPut_95th_percentile": 0, "CheckAndPut_98th_percentile": 0, "CheckAndPut_99th_percentile": 0, "CheckAndPut_99.9th_percentile": 0, "MajorCompactionOutputSize_num_ops": 0, "MajorCompactionOutputSize_min": 0, "MajorCompactionOutputSize_max": 0, "MajorCompactionOutputSize_mean": 0, "MajorCompactionOutputSize_25th_percentile": 0, "MajorCompactionOutputSize_median": 0, "MajorCompactionOutputSize_75th_percentile": 0, "MajorCompactionOutputSize_90th_percentile": 0, "MajorCompactionOutputSize_95th_percentile": 0, "MajorCompactionOutputSize_98th_percentile": 0, "MajorCompactionOutputSize_99th_percentile": 0, "MajorCompactionOutputSize_99.9th_percentile": 0, "CompactionOutputFileCount_num_ops": 0, "CompactionOutputFileCount_min": 0, "CompactionOutputFileCount_max": 0, "CompactionOutputFileCount_mean": 0, "CompactionOutputFileCount_25th_percentile": 0, "CompactionOutputFileCount_median": 0, "CompactionOutputFileCount_75th_percentile": 0, "CompactionOutputFileCount_90th_percentile": 0, "CompactionOutputFileCount_95th_percentile": 0, "CompactionOutputFileCount_98th_percentile": 0, "CompactionOutputFileCount_99th_percentile": 0, "CompactionOutputFileCount_99.9th_percentile": 0, "slowDeleteCount": 0, "FlushTime_num_ops": 0, "FlushTime_min": 0, "FlushTime_max": 0, "FlushTime_mean": 0, "FlushTime_25th_percentile": 0, "FlushTime_median": 0, "FlushTime_75th_percentile": 0, "FlushTime_90th_percentile": 0, "FlushTime_95th_percentile": 0, "FlushTime_98th_percentile": 0, "FlushTime_99th_percentile": 0, "FlushTime_99.9th_percentile": 0, "splitSuccessCount": 0, "MajorCompactionOutputFileCount_num_ops": 0, "MajorCompactionOutputFileCount_min": 0, "MajorCompactionOutputFileCount_max": 0, "MajorCompactionOutputFileCount_mean": 0, "MajorCompactionOutputFileCount_25th_percentile": 0, "MajorCompactionOutputFileCount_median": 0, "MajorCompactionOutputFileCount_75th_percentile": 0, "MajorCompactionOutputFileCount_90th_percentile": 0, "MajorCompactionOutputFileCount_95th_percentile": 0, "MajorCompactionOutputFileCount_98th_percentile": 0, "MajorCompactionOutputFileCount_99th_percentile": 0, "MajorCompactionOutputFileCount_99.9th_percentile": 0, "slowGetCount": 0, "ScanSize_num_ops": 0, "ScanSize_min": 0, "ScanSize_max": 0, "ScanSize_mean": 0, "ScanSize_25th_percentile": 0, "ScanSize_median": 0, "ScanSize_75th_percentile": 0, "ScanSize_90th_percentile": 0, "ScanSize_95th_percentile": 0, "ScanSize_98th_percentile": 0, "ScanSize_99th_percentile": 0, "ScanSize_99.9th_percentile": 0, "CompactionOutputSize_num_ops": 0, "CompactionOutputSize_min": 0, "CompactionOutputSize_max": 0, "CompactionOutputSize_mean": 0, "CompactionOutputSize_25th_percentile": 0, "CompactionOutputSize_median": 0, "CompactionOutputSize_75th_percentile": 0, "CompactionOutputSize_90th_percentile": 0, "CompactionOutputSize_95th_percentile": 0, "CompactionOutputSize_98th_percentile": 0, "CompactionOutputSize_99th_percentile": 0, "CompactionOutputSize_99.9th_percentile": 0, "PauseTimeWithoutGc_num_ops": 0, "PauseTimeWithoutGc_min": 0, "PauseTimeWithoutGc_max": 0, "PauseTimeWithoutGc_mean": 0, "PauseTimeWithoutGc_25th_percentile": 0, "PauseTimeWithoutGc_median": 0, "PauseTimeWithoutGc_75th_percentile": 0, "PauseTimeWithoutGc_90th_percentile": 0, "PauseTimeWithoutGc_95th_percentile": 0, "PauseTimeWithoutGc_98th_percentile": 0, "PauseTimeWithoutGc_99th_percentile": 0, "PauseTimeWithoutGc_99.9th_percentile": 0, "slowIncrementCount": 0, "Append_num_ops": 0, "Append_min": 0, "Append_max": 0, "Append_mean": 0, "Append_25th_percentile": 0, "Append_median": 0, "Append_75th_percentile": 0, "Append_90th_percentile": 0, "Append_95th_percentile": 0, "Append_98th_percentile": 0, "Append_99th_percentile": 0, "Append_99.9th_percentile": 0, "Bulkload_count": 0, "Bulkload_mean_rate": 0.0, "Bulkload_1min_rate": 0.0, "Bulkload_5min_rate": 0.0, "Bulkload_15min_rate": 0.0, "Bulkload_num_ops": 0, "Bulkload_min": 0, "Bulkload_max": 0, "Bulkload_mean": 0, "Bulkload_25th_percentile": 0, "Bulkload_median": 0, "Bulkload_75th_percentile": 0, "Bulkload_90th_percentile": 0, "Bulkload_95th_percentile": 0, "Bulkload_98th_percentile": 0, "Bulkload_99th_percentile": 0, "Bulkload_99.9th_percentile": 0 } ] } 2024-12-10T16:34:38,782 WARN [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42829 {}] master.MasterRpcServices(700): 4b7737f37de9,35753,1733848461832 reported a fatal error: ***** ABORTING region server 4b7737f37de9,35753,1733848461832: testing ***** 2024-12-10T16:34:38,785 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '4b7737f37de9,35753,1733848461832' ***** 2024-12-10T16:34:38,785 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: testing 2024-12-10T16:34:38,786 INFO [RS:0;4b7737f37de9:35753 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-10T16:34:38,786 INFO [RS:0;4b7737f37de9:35753 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager abruptly. 2024-12-10T16:34:38,786 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-10T16:34:38,786 INFO [RS:0;4b7737f37de9:35753 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager abruptly. 2024-12-10T16:34:38,786 INFO [RS:0;4b7737f37de9:35753 {}] regionserver.HRegionServer(3091): Received CLOSE for 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:38,787 INFO [RS:0;4b7737f37de9:35753 {}] regionserver.HRegionServer(956): aborting server 4b7737f37de9,35753,1733848461832 2024-12-10T16:34:38,787 INFO [RS:0;4b7737f37de9:35753 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-10T16:34:38,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40043 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Get size: 140 connection: 172.17.0.3:51350 deadline: 1733848538787, exception=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=4b7737f37de9 port=35753 startCode=1733848461832. As of locationSeqNum=12. 2024-12-10T16:34:38,787 DEBUG [RS_CLOSE_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 3c4a0fab46c2c0263b38e45209989070, disabling compactions & flushes 2024-12-10T16:34:38,787 INFO [RS:0;4b7737f37de9:35753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;4b7737f37de9:35753. 2024-12-10T16:34:38,787 INFO [RS_CLOSE_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. 2024-12-10T16:34:38,787 DEBUG [RS_CLOSE_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. 2024-12-10T16:34:38,787 DEBUG [RS:0;4b7737f37de9:35753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T16:34:38,787 DEBUG [RS_CLOSE_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. after waiting 0 ms 2024-12-10T16:34:38,787 DEBUG [RS_CLOSE_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. 2024-12-10T16:34:38,788 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070., hostname=4b7737f37de9,40043,1733848461924, seqNum=5 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070., hostname=4b7737f37de9,40043,1733848461924, seqNum=5, error=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=4b7737f37de9 port=35753 startCode=1733848461832. As of locationSeqNum=12. 2024-12-10T16:34:38,788 DEBUG [RS:0;4b7737f37de9:35753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T16:34:38,788 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070., hostname=4b7737f37de9,40043,1733848461924, seqNum=5 is org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=4b7737f37de9 port=35753 startCode=1733848461832. As of locationSeqNum=12. 2024-12-10T16:34:38,788 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(84): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070., hostname=4b7737f37de9,40043,1733848461924, seqNum=5 with the new location region=testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070., hostname=4b7737f37de9,35753,1733848461832, seqNum=12 constructed by org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=4b7737f37de9 port=35753 startCode=1733848461832. As of locationSeqNum=12. 2024-12-10T16:34:38,788 INFO [RS:0;4b7737f37de9:35753 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-10T16:34:38,788 DEBUG [RS:0;4b7737f37de9:35753 {}] regionserver.HRegionServer(1325): Online Regions={3c4a0fab46c2c0263b38e45209989070=testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070.} 2024-12-10T16:34:38,789 DEBUG [RS:0;4b7737f37de9:35753 {}] regionserver.HRegionServer(1351): Waiting on 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:38,790 INFO [RS_CLOSE_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. 2024-12-10T16:34:38,790 DEBUG [RS_CLOSE_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 3c4a0fab46c2c0263b38e45209989070: Waiting for close lock at 1733848478787Running coprocessor pre-close hooks at 1733848478787Disabling compacts and flushes for region at 1733848478787Disabling writes for close at 1733848478787Writing region close event to WAL at 1733848478790 (+3 ms)Running coprocessor post-close hooks at 1733848478790Closed at 1733848478790 2024-12-10T16:34:38,791 DEBUG [RS_CLOSE_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. 2024-12-10T16:34:38,843 INFO [regionserver/4b7737f37de9:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T16:34:38,899 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.regionserver.RegionServerAbortedException: Server 4b7737f37de9,35753,1733848461832 aborting at org.apache.hadoop.hbase.ipc.ServerRpcConnection.processRequest(ServerRpcConnection.java:564) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.ServerRpcConnection.processOneRpc(ServerRpcConnection.java:364) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyServerRpcConnection.process(NettyServerRpcConnection.java:89) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcServerRequestDecoder.channelRead0(NettyRpcServerRequestDecoder.java:56) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcServerRequestDecoder.channelRead0(NettyRpcServerRequestDecoder.java:31) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler.channelRead(SimpleChannelInboundHandler.java:99) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T16:34:38,900 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070., hostname=4b7737f37de9,35753,1733848461832, seqNum=12 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070., hostname=4b7737f37de9,35753,1733848461832, seqNum=12, error=org.apache.hadoop.hbase.regionserver.RegionServerAbortedException: Server 4b7737f37de9,35753,1733848461832 aborting 2024-12-10T16:34:38,900 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070., hostname=4b7737f37de9,35753,1733848461832, seqNum=12 is org.apache.hadoop.hbase.regionserver.RegionServerAbortedException: Server 4b7737f37de9,35753,1733848461832 aborting 2024-12-10T16:34:38,900 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(88): Try removing region=testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070., hostname=4b7737f37de9,35753,1733848461832, seqNum=12 from cache 2024-12-10T16:34:38,989 INFO [RS:0;4b7737f37de9:35753 {}] regionserver.HRegionServer(976): stopping server 4b7737f37de9,35753,1733848461832; all regions closed. 2024-12-10T16:34:38,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741835_1011 (size=1408) 2024-12-10T16:34:38,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741835_1011 (size=1408) 2024-12-10T16:34:38,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741835_1011 (size=1408) 2024-12-10T16:34:38,994 DEBUG [RS:0;4b7737f37de9:35753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T16:34:38,994 INFO [RS:0;4b7737f37de9:35753 {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T16:34:38,994 INFO [RS:0;4b7737f37de9:35753 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-10T16:34:38,995 INFO [RS:0;4b7737f37de9:35753 {}] hbase.ChoreService(370): Chore service for: regionserver/4b7737f37de9:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-10T16:34:38,995 INFO [regionserver/4b7737f37de9:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-10T16:34:38,995 INFO [RS:0;4b7737f37de9:35753 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-10T16:34:38,995 INFO [RS:0;4b7737f37de9:35753 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-10T16:34:38,995 INFO [RS:0;4b7737f37de9:35753 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-10T16:34:38,995 INFO [RS:0;4b7737f37de9:35753 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-10T16:34:38,995 INFO [RS:0;4b7737f37de9:35753 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:35753 2024-12-10T16:34:39,029 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35753-0x10010af1c860001, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/4b7737f37de9,35753,1733848461832 2024-12-10T16:34:39,029 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42829-0x10010af1c860000, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-10T16:34:39,029 INFO [RS:0;4b7737f37de9:35753 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-10T16:34:39,030 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [4b7737f37de9,35753,1733848461832] 2024-12-10T16:34:39,045 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/4b7737f37de9,35753,1733848461832 already deleted, retry=false 2024-12-10T16:34:39,046 INFO [RegionServerTracker-0 {}] master.ServerManager(695): Processing expiration of 4b7737f37de9,35753,1733848461832 on 4b7737f37de9,42829,1733848461149 2024-12-10T16:34:39,052 DEBUG [RegionServerTracker-0 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:SERVER_CRASH_START, hasLock=false; ServerCrashProcedure 4b7737f37de9,35753,1733848461832, splitWal=true, meta=false 2024-12-10T16:34:39,056 INFO [RegionServerTracker-0 {}] assignment.AssignmentManager(1999): Scheduled ServerCrashProcedure pid=13 for 4b7737f37de9,35753,1733848461832 (carryingMeta=false) 4b7737f37de9,35753,1733848461832/CRASHED/regionCount=1/lock=java.util.concurrent.locks.ReentrantReadWriteLock@16691d29[Write locks = 1, Read locks = 0], oldState=ONLINE. 2024-12-10T16:34:39,056 INFO [PEWorker-2 {}] procedure.ServerCrashProcedure(169): Start pid=13, state=RUNNABLE:SERVER_CRASH_START, hasLock=true; ServerCrashProcedure 4b7737f37de9,35753,1733848461832, splitWal=true, meta=false 2024-12-10T16:34:39,059 INFO [PEWorker-2 {}] procedure.ServerCrashProcedure(207): 4b7737f37de9,35753,1733848461832 had 1 regions 2024-12-10T16:34:39,060 INFO [PEWorker-2 {}] procedure.ServerCrashProcedure(339): Splitting WALs pid=13, state=RUNNABLE:SERVER_CRASH_SPLIT_LOGS, hasLock=true; ServerCrashProcedure 4b7737f37de9,35753,1733848461832, splitWal=true, meta=false, isMeta: false 2024-12-10T16:34:39,062 DEBUG [PEWorker-2 {}] master.MasterWalManager(329): Renamed region directory: hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/WALs/4b7737f37de9,35753,1733848461832-splitting 2024-12-10T16:34:39,064 INFO [PEWorker-2 {}] master.SplitWALManager(105): 4b7737f37de9,35753,1733848461832 WAL count=1, meta=false 2024-12-10T16:34:39,066 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE:ACQUIRE_SPLIT_WAL_WORKER, hasLock=false; SplitWALProcedure 4b7737f37de9%2C35753%2C1733848461832.1733848463581}] 2024-12-10T16:34:39,072 DEBUG [PEWorker-1 {}] master.SplitWALManager(158): Acquired split WAL worker=4b7737f37de9,40043,1733848461924 2024-12-10T16:34:39,074 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE, hasLock=false; SplitWALRemoteProcedure 4b7737f37de9%2C35753%2C1733848461832.1733848463581, worker=4b7737f37de9,40043,1733848461924}] 2024-12-10T16:34:39,112 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testReplayEditsAfterRegionMovedWithMultiCF', row='r1', locateType=CURRENT is [region=testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070., hostname=4b7737f37de9,35753,1733848461832, seqNum=18] 2024-12-10T16:34:39,114 WARN [RPCClient-NioEventLoopGroup-6-5 {}] ipc.NettyRpcConnection$2(409): Exception encountered while connecting to the server 4b7737f37de9:35753 org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: Connection refused: 4b7737f37de9/172.17.0.3:35753 Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hbase.thirdparty.io.netty.channel.socket.nio.NioSocketChannel.doFinishConnect(NioSocketChannel.java:336) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioChannel$AbstractNioUnsafe.finishConnect(AbstractNioChannel.java:339) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:776) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T16:34:39,115 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070., hostname=4b7737f37de9,35753,1733848461832, seqNum=18 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070., hostname=4b7737f37de9,35753,1733848461832, seqNum=18, error=java.net.ConnectException: Call to address=4b7737f37de9:35753 failed on connection exception: org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: Connection refused: 4b7737f37de9/172.17.0.3:35753 2024-12-10T16:34:39,116 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070., hostname=4b7737f37de9,35753,1733848461832, seqNum=18 is java.net.ConnectException: Connection refused 2024-12-10T16:34:39,116 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(88): Try removing region=testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070., hostname=4b7737f37de9,35753,1733848461832, seqNum=18 from cache 2024-12-10T16:34:39,116 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.FailedServers(52): Added failed server with address 4b7737f37de9:35753 to list caused by org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: Connection refused: 4b7737f37de9/172.17.0.3:35753 2024-12-10T16:34:39,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35753-0x10010af1c860001, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T16:34:39,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35753-0x10010af1c860001, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T16:34:39,138 INFO [RS:0;4b7737f37de9:35753 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-10T16:34:39,138 INFO [RS:0;4b7737f37de9:35753 {}] regionserver.HRegionServer(1031): Exiting; stopping=4b7737f37de9,35753,1733848461832; zookeeper connection closed. 2024-12-10T16:34:39,139 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@a5b3599 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@a5b3599 2024-12-10T16:34:39,242 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40043 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SplitWALCallable, pid=15 2024-12-10T16:34:39,257 INFO [RS_LOG_REPLAY_OPS-regionserver/4b7737f37de9:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(299): Splitting hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/WALs/4b7737f37de9,35753,1733848461832-splitting/4b7737f37de9%2C35753%2C1733848461832.1733848463581, size=1.4 K (1408bytes) 2024-12-10T16:34:39,257 INFO [RS_LOG_REPLAY_OPS-regionserver/4b7737f37de9:0-0 {event_type=RS_LOG_REPLAY, pid=15}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/WALs/4b7737f37de9,35753,1733848461832-splitting/4b7737f37de9%2C35753%2C1733848461832.1733848463581 2024-12-10T16:34:39,257 INFO [RS_LOG_REPLAY_OPS-regionserver/4b7737f37de9:0-0 {event_type=RS_LOG_REPLAY, pid=15}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/WALs/4b7737f37de9,35753,1733848461832-splitting/4b7737f37de9%2C35753%2C1733848461832.1733848463581 after 0ms 2024-12-10T16:34:39,260 DEBUG [RS_LOG_REPLAY_OPS-regionserver/4b7737f37de9:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/WALs/4b7737f37de9,35753,1733848461832-splitting/4b7737f37de9%2C35753%2C1733848461832.1733848463581: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T16:34:39,260 INFO [RS_LOG_REPLAY_OPS-regionserver/4b7737f37de9:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(310): Open hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/WALs/4b7737f37de9,35753,1733848461832-splitting/4b7737f37de9%2C35753%2C1733848461832.1733848463581 took 4ms 2024-12-10T16:34:39,266 DEBUG [RS_LOG_REPLAY_OPS-regionserver/4b7737f37de9:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(352): Last flushed sequenceid for 3c4a0fab46c2c0263b38e45209989070: last_flushed_sequence_id: 12 store_sequence_id { family_name: "cf1" sequence_id: 12 } store_sequence_id { family_name: "cf2" sequence_id: 12 } 2024-12-10T16:34:39,267 DEBUG [RS_LOG_REPLAY_OPS-regionserver/4b7737f37de9:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/WALs/4b7737f37de9,35753,1733848461832-splitting/4b7737f37de9%2C35753%2C1733848461832.1733848463581 so closing down 2024-12-10T16:34:39,267 DEBUG [RS_LOG_REPLAY_OPS-regionserver/4b7737f37de9:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-10T16:34:39,267 INFO [RS_LOG_REPLAY_OPS-regionserver/4b7737f37de9:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.OutputSink(145): 3 split writer threads finished 2024-12-10T16:34:39,267 INFO [RS_LOG_REPLAY_OPS-regionserver/4b7737f37de9:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(425): Processed 6 edits across 0 Regions in 7 ms; skipped=6; WAL=hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/WALs/4b7737f37de9,35753,1733848461832-splitting/4b7737f37de9%2C35753%2C1733848461832.1733848463581, size=1.4 K, length=1408, corrupted=false, cancelled=false 2024-12-10T16:34:39,267 DEBUG [RS_LOG_REPLAY_OPS-regionserver/4b7737f37de9:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(428): Completed split of hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/WALs/4b7737f37de9,35753,1733848461832-splitting/4b7737f37de9%2C35753%2C1733848461832.1733848463581, journal: Splitting hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/WALs/4b7737f37de9,35753,1733848461832-splitting/4b7737f37de9%2C35753%2C1733848461832.1733848463581, size=1.4 K (1408bytes) at 1733848479257Finishing writing output for hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/WALs/4b7737f37de9,35753,1733848461832-splitting/4b7737f37de9%2C35753%2C1733848461832.1733848463581 so closing down at 1733848479267 (+10 ms)3 split writer threads finished at 1733848479267Processed 6 edits across 0 Regions in 7 ms; skipped=6; WAL=hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/WALs/4b7737f37de9,35753,1733848461832-splitting/4b7737f37de9%2C35753%2C1733848461832.1733848463581, size=1.4 K, length=1408, corrupted=false, cancelled=false at 1733848479267 2024-12-10T16:34:39,267 DEBUG [RS_LOG_REPLAY_OPS-regionserver/4b7737f37de9:0-0 {event_type=RS_LOG_REPLAY, pid=15}] regionserver.SplitLogWorker(218): Done splitting WAL hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/WALs/4b7737f37de9,35753,1733848461832-splitting/4b7737f37de9%2C35753%2C1733848461832.1733848463581 2024-12-10T16:34:39,268 DEBUG [RS_LOG_REPLAY_OPS-regionserver/4b7737f37de9:0-0 {event_type=RS_LOG_REPLAY, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-12-10T16:34:39,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42829 {}] master.HMaster(4169): Remote procedure done, pid=15 2024-12-10T16:34:39,274 INFO [PEWorker-4 {}] wal.WALSplitUtil(143): Moved hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/WALs/4b7737f37de9,35753,1733848461832-splitting/4b7737f37de9%2C35753%2C1733848461832.1733848463581 to hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/oldWALs 2024-12-10T16:34:39,277 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=15, resume processing ppid=14 2024-12-10T16:34:39,277 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=15, ppid=14, state=SUCCESS, hasLock=false; SplitWALRemoteProcedure 4b7737f37de9%2C35753%2C1733848461832.1733848463581, worker=4b7737f37de9,40043,1733848461924 in 200 msec 2024-12-10T16:34:39,278 DEBUG [PEWorker-5 {}] master.SplitWALManager(172): Release split WAL worker=4b7737f37de9,40043,1733848461924 2024-12-10T16:34:39,281 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-12-10T16:34:39,281 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; SplitWALProcedure 4b7737f37de9%2C35753%2C1733848461832.1733848463581, worker=4b7737f37de9,40043,1733848461924 in 213 msec 2024-12-10T16:34:39,283 INFO [PEWorker-2 {}] master.SplitLogManager(171): hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/WALs/4b7737f37de9,35753,1733848461832-splitting dir is empty, no logs to split. 2024-12-10T16:34:39,283 INFO [PEWorker-2 {}] master.SplitWALManager(105): 4b7737f37de9,35753,1733848461832 WAL count=0, meta=false 2024-12-10T16:34:39,283 DEBUG [PEWorker-2 {}] procedure.ServerCrashProcedure(329): Check if 4b7737f37de9,35753,1733848461832 WAL splitting is done? wals=0, meta=false 2024-12-10T16:34:39,285 INFO [PEWorker-2 {}] procedure.ServerCrashProcedure(321): Remove WAL directory for 4b7737f37de9,35753,1733848461832 failed, ignore...File hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/WALs/4b7737f37de9,35753,1733848461832-splitting does not exist. 2024-12-10T16:34:39,287 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=16, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=3c4a0fab46c2c0263b38e45209989070, ASSIGN}] 2024-12-10T16:34:39,289 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=3c4a0fab46c2c0263b38e45209989070, ASSIGN 2024-12-10T16:34:39,290 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=16, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=3c4a0fab46c2c0263b38e45209989070, ASSIGN; state=OPEN, location=null; forceNewPlan=true, retain=false 2024-12-10T16:34:39,430 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testReplayEditsAfterRegionMovedWithMultiCF', row='r1', locateType=CURRENT is [region=testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070., hostname=4b7737f37de9,35753,1733848461832, seqNum=18] 2024-12-10T16:34:39,431 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.AbstractRpcClient(357): Not trying to connect to 4b7737f37de9:35753 this server is in the failed servers list 2024-12-10T16:34:39,431 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070., hostname=4b7737f37de9,35753,1733848461832, seqNum=18 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070., hostname=4b7737f37de9,35753,1733848461832, seqNum=18, error=org.apache.hadoop.hbase.ipc.FailedServerException: Call to address=4b7737f37de9:35753 failed on local exception: org.apache.hadoop.hbase.ipc.FailedServerException: This server is in the failed servers list: 4b7737f37de9:35753 2024-12-10T16:34:39,431 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070., hostname=4b7737f37de9,35753,1733848461832, seqNum=18 is org.apache.hadoop.hbase.ipc.FailedServerException: This server is in the failed servers list: 4b7737f37de9:35753 2024-12-10T16:34:39,432 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(88): Try removing region=testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070., hostname=4b7737f37de9,35753,1733848461832, seqNum=18 from cache 2024-12-10T16:34:39,441 DEBUG [4b7737f37de9:42829 {}] balancer.BalancerClusterState(204): Hosts are {4b7737f37de9=0} racks are {/default-rack=0} 2024-12-10T16:34:39,441 DEBUG [4b7737f37de9:42829 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-10T16:34:39,441 DEBUG [4b7737f37de9:42829 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-10T16:34:39,441 DEBUG [4b7737f37de9:42829 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-10T16:34:39,441 DEBUG [4b7737f37de9:42829 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-10T16:34:39,441 INFO [4b7737f37de9:42829 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-10T16:34:39,441 INFO [4b7737f37de9:42829 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-10T16:34:39,441 DEBUG [4b7737f37de9:42829 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-10T16:34:39,441 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=16 updating hbase:meta row=3c4a0fab46c2c0263b38e45209989070, regionState=OPENING, regionLocation=4b7737f37de9,44673,1733848461969 2024-12-10T16:34:39,444 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=16, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=3c4a0fab46c2c0263b38e45209989070, ASSIGN because future has completed 2024-12-10T16:34:39,444 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3c4a0fab46c2c0263b38e45209989070, server=4b7737f37de9,44673,1733848461969}] 2024-12-10T16:34:39,597 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-10T16:34:39,600 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49337, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-10T16:34:39,609 INFO [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] handler.AssignRegionHandler(132): Open testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. 2024-12-10T16:34:39,609 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(7752): Opening region: {ENCODED => 3c4a0fab46c2c0263b38e45209989070, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070.', STARTKEY => '', ENDKEY => ''} 2024-12-10T16:34:39,610 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterRegionMovedWithMultiCF 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:39,610 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T16:34:39,610 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(7794): checking encryption for 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:39,610 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(7797): checking classloading for 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:39,612 INFO [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:39,613 INFO [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3c4a0fab46c2c0263b38e45209989070 columnFamilyName cf1 2024-12-10T16:34:39,613 DEBUG [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:39,621 DEBUG [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070/cf1/452cb5bb11a747e58753c9152983a4f8 2024-12-10T16:34:39,621 INFO [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] regionserver.HStore(327): Store=3c4a0fab46c2c0263b38e45209989070/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:39,621 INFO [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:39,622 INFO [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3c4a0fab46c2c0263b38e45209989070 columnFamilyName cf2 2024-12-10T16:34:39,622 DEBUG [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:39,629 DEBUG [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070/cf2/5073f024fb8947f1ba78b2876d1b4660 2024-12-10T16:34:39,629 INFO [StoreOpener-3c4a0fab46c2c0263b38e45209989070-1 {}] regionserver.HStore(327): Store=3c4a0fab46c2c0263b38e45209989070/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:39,629 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1038): replaying wal for 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:39,630 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:39,632 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:39,632 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1048): stopping wal replay for 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:39,632 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1060): Cleaning up temporary data for 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:39,633 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterRegionMovedWithMultiCF descriptor;using region.getMemStoreFlushHeapSize/# of families (64.0 M)) instead. 2024-12-10T16:34:39,635 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1093): writing seq id for 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:39,636 INFO [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1114): Opened 3c4a0fab46c2c0263b38e45209989070; next sequenceid=18; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69989981, jitterRate=0.04293198883533478}}}, FlushLargeStoresPolicy{flushSizeLowerBound=67108864} 2024-12-10T16:34:39,636 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:39,637 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1006): Region open journal for 3c4a0fab46c2c0263b38e45209989070: Running coprocessor pre-open hook at 1733848479610Writing region info on filesystem at 1733848479610Initializing all the Stores at 1733848479612 (+2 ms)Instantiating store for column family {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848479612Instantiating store for column family {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848479612Cleaning up temporary data from old regions at 1733848479633 (+21 ms)Running coprocessor post-open hooks at 1733848479636 (+3 ms)Region opened successfully at 1733848479637 (+1 ms) 2024-12-10T16:34:39,638 INFO [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegionServer(2236): Post open deploy tasks for testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070., pid=17, masterSystemTime=1733848479597 2024-12-10T16:34:39,640 DEBUG [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegionServer(2266): Finished post open deploy task for testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. 2024-12-10T16:34:39,641 INFO [RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] handler.AssignRegionHandler(153): Opened testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. 2024-12-10T16:34:39,641 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=16 updating hbase:meta row=3c4a0fab46c2c0263b38e45209989070, regionState=OPEN, openSeqNum=18, regionLocation=4b7737f37de9,44673,1733848461969 2024-12-10T16:34:39,643 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=17, ppid=16, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3c4a0fab46c2c0263b38e45209989070, server=4b7737f37de9,44673,1733848461969 because future has completed 2024-12-10T16:34:39,647 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=17, resume processing ppid=16 2024-12-10T16:34:39,647 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=17, ppid=16, state=SUCCESS, hasLock=false; OpenRegionProcedure 3c4a0fab46c2c0263b38e45209989070, server=4b7737f37de9,44673,1733848461969 in 201 msec 2024-12-10T16:34:39,649 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=16, resume processing ppid=13 2024-12-10T16:34:39,649 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=16, ppid=13, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=3c4a0fab46c2c0263b38e45209989070, ASSIGN in 360 msec 2024-12-10T16:34:39,649 INFO [PEWorker-5 {}] procedure.ServerCrashProcedure(291): removed crashed server 4b7737f37de9,35753,1733848461832 after splitting done 2024-12-10T16:34:39,651 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; ServerCrashProcedure 4b7737f37de9,35753,1733848461832, splitWal=true, meta=false in 602 msec 2024-12-10T16:34:39,944 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testReplayEditsAfterRegionMovedWithMultiCF', row='r1', locateType=CURRENT is [region=testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070., hostname=4b7737f37de9,44673,1733848461969, seqNum=18] 2024-12-10T16:34:39,945 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T16:34:39,948 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54044, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T16:34:39,964 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterRegionMovedWithMultiCF Thread=415 (was 414) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/4b7737f37de9:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Abort regionserver monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-588849337_22 at /127.0.0.1:59994 [Waiting for operation #25] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_LOG_REPLAY_OPS-regionserver/4b7737f37de9:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-401009118_22 at /127.0.0.1:34656 [Waiting for operation #22] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/4b7737f37de9:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-676385714_22 at /127.0.0.1:55020 [Waiting for operation #22] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1053 (was 1024) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=178 (was 176) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4859 (was 4917) 2024-12-10T16:34:39,966 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1053 is superior to 1024 2024-12-10T16:34:39,979 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterPartialFlush Thread=415, OpenFileDescriptor=1053, MaxFileDescriptor=1048576, SystemLoadAverage=178, ProcessCount=11, AvailableMemoryMB=4859 2024-12-10T16:34:39,979 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1053 is superior to 1024 2024-12-10T16:34:39,995 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T16:34:39,996 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T16:34:39,997 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T16:34:39,999 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-70588895, suffix=, logDir=hdfs://localhost:35477/hbase/WALs/hregion-70588895, archiveDir=hdfs://localhost:35477/hbase/oldWALs, maxLogs=32 2024-12-10T16:34:40,010 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-70588895/hregion-70588895.1733848479999, exclude list is [], retry=0 2024-12-10T16:34:40,012 DEBUG [AsyncFSWAL-20-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43913,DS-3efadbaf-2f12-4fe8-9fd8-91ba1926bf09,DISK] 2024-12-10T16:34:40,012 DEBUG [AsyncFSWAL-20-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42039,DS-c05f9a42-583c-4f58-bba4-77e13705c0bc,DISK] 2024-12-10T16:34:40,013 DEBUG [AsyncFSWAL-20-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46873,DS-ddfb4a98-1093-42ea-847d-0d86a4cd1023,DISK] 2024-12-10T16:34:40,015 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-70588895/hregion-70588895.1733848479999 2024-12-10T16:34:40,015 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36795:36795),(127.0.0.1/127.0.0.1:44417:44417),(127.0.0.1/127.0.0.1:42633:42633)] 2024-12-10T16:34:40,015 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 049250c0d005b55396dbe65296760c72, NAME => 'testReplayEditsWrittenViaHRegion,,1733848479995.049250c0d005b55396dbe65296760c72.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenViaHRegion', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35477/hbase 2024-12-10T16:34:40,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741893_1071 (size=67) 2024-12-10T16:34:40,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741893_1071 (size=67) 2024-12-10T16:34:40,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741893_1071 (size=67) 2024-12-10T16:34:40,027 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1733848479995.049250c0d005b55396dbe65296760c72.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T16:34:40,029 INFO [StoreOpener-049250c0d005b55396dbe65296760c72-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 049250c0d005b55396dbe65296760c72 2024-12-10T16:34:40,030 INFO [StoreOpener-049250c0d005b55396dbe65296760c72-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 049250c0d005b55396dbe65296760c72 columnFamilyName a 2024-12-10T16:34:40,030 DEBUG [StoreOpener-049250c0d005b55396dbe65296760c72-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:40,031 INFO [StoreOpener-049250c0d005b55396dbe65296760c72-1 {}] regionserver.HStore(327): Store=049250c0d005b55396dbe65296760c72/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:40,031 INFO [StoreOpener-049250c0d005b55396dbe65296760c72-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 049250c0d005b55396dbe65296760c72 2024-12-10T16:34:40,032 INFO [StoreOpener-049250c0d005b55396dbe65296760c72-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 049250c0d005b55396dbe65296760c72 columnFamilyName b 2024-12-10T16:34:40,032 DEBUG [StoreOpener-049250c0d005b55396dbe65296760c72-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:40,033 INFO [StoreOpener-049250c0d005b55396dbe65296760c72-1 {}] regionserver.HStore(327): Store=049250c0d005b55396dbe65296760c72/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:40,033 INFO [StoreOpener-049250c0d005b55396dbe65296760c72-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 049250c0d005b55396dbe65296760c72 2024-12-10T16:34:40,034 INFO [StoreOpener-049250c0d005b55396dbe65296760c72-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 049250c0d005b55396dbe65296760c72 columnFamilyName c 2024-12-10T16:34:40,034 DEBUG [StoreOpener-049250c0d005b55396dbe65296760c72-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:40,035 INFO [StoreOpener-049250c0d005b55396dbe65296760c72-1 {}] regionserver.HStore(327): Store=049250c0d005b55396dbe65296760c72/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:40,035 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 049250c0d005b55396dbe65296760c72 2024-12-10T16:34:40,035 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/049250c0d005b55396dbe65296760c72 2024-12-10T16:34:40,036 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/049250c0d005b55396dbe65296760c72 2024-12-10T16:34:40,037 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 049250c0d005b55396dbe65296760c72 2024-12-10T16:34:40,037 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 049250c0d005b55396dbe65296760c72 2024-12-10T16:34:40,037 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-10T16:34:40,038 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 049250c0d005b55396dbe65296760c72 2024-12-10T16:34:40,040 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/049250c0d005b55396dbe65296760c72/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T16:34:40,040 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 049250c0d005b55396dbe65296760c72; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69014285, jitterRate=0.028392985463142395}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-10T16:34:40,041 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 049250c0d005b55396dbe65296760c72: Writing region info on filesystem at 1733848480027Initializing all the Stores at 1733848480028 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848480028Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848480028Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848480028Cleaning up temporary data from old regions at 1733848480037 (+9 ms)Region opened successfully at 1733848480040 (+3 ms) 2024-12-10T16:34:40,041 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 049250c0d005b55396dbe65296760c72, disabling compactions & flushes 2024-12-10T16:34:40,041 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1733848479995.049250c0d005b55396dbe65296760c72. 2024-12-10T16:34:40,041 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1733848479995.049250c0d005b55396dbe65296760c72. 2024-12-10T16:34:40,041 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1733848479995.049250c0d005b55396dbe65296760c72. after waiting 0 ms 2024-12-10T16:34:40,041 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1733848479995.049250c0d005b55396dbe65296760c72. 2024-12-10T16:34:40,041 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1733848479995.049250c0d005b55396dbe65296760c72. 2024-12-10T16:34:40,041 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 049250c0d005b55396dbe65296760c72: Waiting for close lock at 1733848480041Disabling compacts and flushes for region at 1733848480041Disabling writes for close at 1733848480041Writing region close event to WAL at 1733848480041Closed at 1733848480041 2024-12-10T16:34:40,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741892_1070 (size=95) 2024-12-10T16:34:40,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741892_1070 (size=95) 2024-12-10T16:34:40,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741892_1070 (size=95) 2024-12-10T16:34:40,046 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-10T16:34:40,046 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-70588895:(num 1733848479999) 2024-12-10T16:34:40,046 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-10T16:34:40,048 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:35477/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733848479994, archiveDir=hdfs://localhost:35477/hbase/oldWALs, maxLogs=32 2024-12-10T16:34:40,061 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733848479994/wal.1733848480048, exclude list is [], retry=0 2024-12-10T16:34:40,064 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46873,DS-ddfb4a98-1093-42ea-847d-0d86a4cd1023,DISK] 2024-12-10T16:34:40,065 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43913,DS-3efadbaf-2f12-4fe8-9fd8-91ba1926bf09,DISK] 2024-12-10T16:34:40,065 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42039,DS-c05f9a42-583c-4f58-bba4-77e13705c0bc,DISK] 2024-12-10T16:34:40,066 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733848479994/wal.1733848480048 2024-12-10T16:34:40,067 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42633:42633),(127.0.0.1/127.0.0.1:36795:36795),(127.0.0.1/127.0.0.1:44417:44417)] 2024-12-10T16:34:40,067 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 049250c0d005b55396dbe65296760c72, NAME => 'testReplayEditsWrittenViaHRegion,,1733848479995.049250c0d005b55396dbe65296760c72.', STARTKEY => '', ENDKEY => ''} 2024-12-10T16:34:40,067 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1733848479995.049250c0d005b55396dbe65296760c72.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T16:34:40,067 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 049250c0d005b55396dbe65296760c72 2024-12-10T16:34:40,067 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 049250c0d005b55396dbe65296760c72 2024-12-10T16:34:40,072 INFO [StoreOpener-049250c0d005b55396dbe65296760c72-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 049250c0d005b55396dbe65296760c72 2024-12-10T16:34:40,073 INFO [StoreOpener-049250c0d005b55396dbe65296760c72-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 049250c0d005b55396dbe65296760c72 columnFamilyName a 2024-12-10T16:34:40,074 DEBUG [StoreOpener-049250c0d005b55396dbe65296760c72-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:40,074 INFO [StoreOpener-049250c0d005b55396dbe65296760c72-1 {}] regionserver.HStore(327): Store=049250c0d005b55396dbe65296760c72/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:40,074 INFO [StoreOpener-049250c0d005b55396dbe65296760c72-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 049250c0d005b55396dbe65296760c72 2024-12-10T16:34:40,075 INFO [StoreOpener-049250c0d005b55396dbe65296760c72-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 049250c0d005b55396dbe65296760c72 columnFamilyName b 2024-12-10T16:34:40,075 DEBUG [StoreOpener-049250c0d005b55396dbe65296760c72-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:40,076 INFO [StoreOpener-049250c0d005b55396dbe65296760c72-1 {}] regionserver.HStore(327): Store=049250c0d005b55396dbe65296760c72/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:40,076 INFO [StoreOpener-049250c0d005b55396dbe65296760c72-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 049250c0d005b55396dbe65296760c72 2024-12-10T16:34:40,077 INFO [StoreOpener-049250c0d005b55396dbe65296760c72-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 049250c0d005b55396dbe65296760c72 columnFamilyName c 2024-12-10T16:34:40,077 DEBUG [StoreOpener-049250c0d005b55396dbe65296760c72-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:40,077 INFO [StoreOpener-049250c0d005b55396dbe65296760c72-1 {}] regionserver.HStore(327): Store=049250c0d005b55396dbe65296760c72/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:40,078 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 049250c0d005b55396dbe65296760c72 2024-12-10T16:34:40,078 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/049250c0d005b55396dbe65296760c72 2024-12-10T16:34:40,079 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/049250c0d005b55396dbe65296760c72 2024-12-10T16:34:40,080 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 049250c0d005b55396dbe65296760c72 2024-12-10T16:34:40,080 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 049250c0d005b55396dbe65296760c72 2024-12-10T16:34:40,081 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-10T16:34:40,082 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 049250c0d005b55396dbe65296760c72 2024-12-10T16:34:40,083 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 049250c0d005b55396dbe65296760c72; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59253734, jitterRate=-0.11705055832862854}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-10T16:34:40,083 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 049250c0d005b55396dbe65296760c72: Writing region info on filesystem at 1733848480068Initializing all the Stores at 1733848480069 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848480069Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848480072 (+3 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848480072Cleaning up temporary data from old regions at 1733848480080 (+8 ms)Region opened successfully at 1733848480083 (+3 ms) 2024-12-10T16:34:40,118 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 049250c0d005b55396dbe65296760c72 3/3 column families, dataSize=2.55 KB heapSize=5.44 KB 2024-12-10T16:34:40,141 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/049250c0d005b55396dbe65296760c72/.tmp/a/a6be1f0bcc69471682b9571a98cc4081 is 91, key is testReplayEditsWrittenViaHRegion/a:x0/1733848480083/Put/seqid=0 2024-12-10T16:34:40,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741895_1073 (size=5958) 2024-12-10T16:34:40,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741895_1073 (size=5958) 2024-12-10T16:34:40,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741895_1073 (size=5958) 2024-12-10T16:34:40,149 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=33 (bloomFilter=true), to=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/049250c0d005b55396dbe65296760c72/.tmp/a/a6be1f0bcc69471682b9571a98cc4081 2024-12-10T16:34:40,167 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/049250c0d005b55396dbe65296760c72/.tmp/b/b860cacc1a044bfb8f4b137ae47504e9 is 91, key is testReplayEditsWrittenViaHRegion/b:x0/1733848480095/Put/seqid=0 2024-12-10T16:34:40,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741896_1074 (size=5958) 2024-12-10T16:34:40,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741896_1074 (size=5958) 2024-12-10T16:34:40,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741896_1074 (size=5958) 2024-12-10T16:34:40,178 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=33 (bloomFilter=true), to=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/049250c0d005b55396dbe65296760c72/.tmp/b/b860cacc1a044bfb8f4b137ae47504e9 2024-12-10T16:34:40,196 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/049250c0d005b55396dbe65296760c72/.tmp/c/ff99e5afbc674bf084852bbf2725e20d is 91, key is testReplayEditsWrittenViaHRegion/c:x0/1733848480105/Put/seqid=0 2024-12-10T16:34:40,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741897_1075 (size=5958) 2024-12-10T16:34:40,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741897_1075 (size=5958) 2024-12-10T16:34:40,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741897_1075 (size=5958) 2024-12-10T16:34:40,203 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=33 (bloomFilter=true), to=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/049250c0d005b55396dbe65296760c72/.tmp/c/ff99e5afbc674bf084852bbf2725e20d 2024-12-10T16:34:40,209 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/049250c0d005b55396dbe65296760c72/.tmp/a/a6be1f0bcc69471682b9571a98cc4081 as hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/049250c0d005b55396dbe65296760c72/a/a6be1f0bcc69471682b9571a98cc4081 2024-12-10T16:34:40,215 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/049250c0d005b55396dbe65296760c72/a/a6be1f0bcc69471682b9571a98cc4081, entries=10, sequenceid=33, filesize=5.8 K 2024-12-10T16:34:40,216 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/049250c0d005b55396dbe65296760c72/.tmp/b/b860cacc1a044bfb8f4b137ae47504e9 as hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/049250c0d005b55396dbe65296760c72/b/b860cacc1a044bfb8f4b137ae47504e9 2024-12-10T16:34:40,222 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/049250c0d005b55396dbe65296760c72/b/b860cacc1a044bfb8f4b137ae47504e9, entries=10, sequenceid=33, filesize=5.8 K 2024-12-10T16:34:40,224 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/049250c0d005b55396dbe65296760c72/.tmp/c/ff99e5afbc674bf084852bbf2725e20d as hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/049250c0d005b55396dbe65296760c72/c/ff99e5afbc674bf084852bbf2725e20d 2024-12-10T16:34:40,230 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/049250c0d005b55396dbe65296760c72/c/ff99e5afbc674bf084852bbf2725e20d, entries=10, sequenceid=33, filesize=5.8 K 2024-12-10T16:34:40,231 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~2.55 KB/2610, heapSize ~5.39 KB/5520, currentSize=0 B/0 for 049250c0d005b55396dbe65296760c72 in 113ms, sequenceid=33, compaction requested=false 2024-12-10T16:34:40,231 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 049250c0d005b55396dbe65296760c72: 2024-12-10T16:34:40,231 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 049250c0d005b55396dbe65296760c72, disabling compactions & flushes 2024-12-10T16:34:40,231 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1733848479995.049250c0d005b55396dbe65296760c72. 2024-12-10T16:34:40,231 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1733848479995.049250c0d005b55396dbe65296760c72. 2024-12-10T16:34:40,231 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1733848479995.049250c0d005b55396dbe65296760c72. after waiting 0 ms 2024-12-10T16:34:40,231 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1733848479995.049250c0d005b55396dbe65296760c72. 2024-12-10T16:34:40,232 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1733848479995.049250c0d005b55396dbe65296760c72. 2024-12-10T16:34:40,233 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 049250c0d005b55396dbe65296760c72: Waiting for close lock at 1733848480231Disabling compacts and flushes for region at 1733848480231Disabling writes for close at 1733848480231Writing region close event to WAL at 1733848480232 (+1 ms)Closed at 1733848480232 2024-12-10T16:34:40,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741894_1072 (size=3385) 2024-12-10T16:34:40,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741894_1072 (size=3385) 2024-12-10T16:34:40,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741894_1072 (size=3385) 2024-12-10T16:34:40,241 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/049250c0d005b55396dbe65296760c72/b/b860cacc1a044bfb8f4b137ae47504e9 to hdfs://localhost:35477/hbase/archive/data/default/testReplayEditsWrittenViaHRegion/049250c0d005b55396dbe65296760c72/b/b860cacc1a044bfb8f4b137ae47504e9 2024-12-10T16:34:40,260 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:35477/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733848479994/wal.1733848480048, size=3.3 K (3385bytes) 2024-12-10T16:34:40,260 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35477/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733848479994/wal.1733848480048 2024-12-10T16:34:40,260 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:35477/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733848479994/wal.1733848480048 after 0ms 2024-12-10T16:34:40,262 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:35477/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733848479994/wal.1733848480048: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T16:34:40,263 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:35477/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733848479994/wal.1733848480048 took 3ms 2024-12-10T16:34:40,265 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:35477/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733848479994/wal.1733848480048 so closing down 2024-12-10T16:34:40,266 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-10T16:34:40,267 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1733848480048.temp 2024-12-10T16:34:40,268 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/049250c0d005b55396dbe65296760c72/recovered.edits/0000000000000000003-wal.1733848480048.temp 2024-12-10T16:34:40,269 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-10T16:34:40,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741898_1076 (size=2944) 2024-12-10T16:34:40,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741898_1076 (size=2944) 2024-12-10T16:34:40,278 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/049250c0d005b55396dbe65296760c72/recovered.edits/0000000000000000003-wal.1733848480048.temp (wrote 30 edits, skipped 0 edits in 0 ms) 2024-12-10T16:34:40,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741898_1076 (size=2944) 2024-12-10T16:34:40,280 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/049250c0d005b55396dbe65296760c72/recovered.edits/0000000000000000003-wal.1733848480048.temp to hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/049250c0d005b55396dbe65296760c72/recovered.edits/0000000000000000032 2024-12-10T16:34:40,280 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 32 edits across 1 Regions in 17 ms; skipped=2; WAL=hdfs://localhost:35477/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733848479994/wal.1733848480048, size=3.3 K, length=3385, corrupted=false, cancelled=false 2024-12-10T16:34:40,280 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:35477/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733848479994/wal.1733848480048, journal: Splitting hdfs://localhost:35477/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733848479994/wal.1733848480048, size=3.3 K (3385bytes) at 1733848480260Finishing writing output for hdfs://localhost:35477/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733848479994/wal.1733848480048 so closing down at 1733848480266 (+6 ms)Creating recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/049250c0d005b55396dbe65296760c72/recovered.edits/0000000000000000003-wal.1733848480048.temp at 1733848480268 (+2 ms)3 split writer threads finished at 1733848480269 (+1 ms)Closed recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/049250c0d005b55396dbe65296760c72/recovered.edits/0000000000000000003-wal.1733848480048.temp (wrote 30 edits, skipped 0 edits in 0 ms) at 1733848480278 (+9 ms)Rename recovered edits hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/049250c0d005b55396dbe65296760c72/recovered.edits/0000000000000000003-wal.1733848480048.temp to hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/049250c0d005b55396dbe65296760c72/recovered.edits/0000000000000000032 at 1733848480280 (+2 ms)Processed 32 edits across 1 Regions in 17 ms; skipped=2; WAL=hdfs://localhost:35477/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733848479994/wal.1733848480048, size=3.3 K, length=3385, corrupted=false, cancelled=false at 1733848480280 2024-12-10T16:34:40,282 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:35477/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733848479994/wal.1733848480048 to hdfs://localhost:35477/hbase/oldWALs/wal.1733848480048 2024-12-10T16:34:40,283 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/049250c0d005b55396dbe65296760c72/recovered.edits/0000000000000000032 2024-12-10T16:34:40,283 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-10T16:34:40,285 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:35477/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733848479994, archiveDir=hdfs://localhost:35477/hbase/oldWALs, maxLogs=32 2024-12-10T16:34:40,296 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733848479994/wal.1733848480285, exclude list is [], retry=0 2024-12-10T16:34:40,298 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46873,DS-ddfb4a98-1093-42ea-847d-0d86a4cd1023,DISK] 2024-12-10T16:34:40,299 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42039,DS-c05f9a42-583c-4f58-bba4-77e13705c0bc,DISK] 2024-12-10T16:34:40,299 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43913,DS-3efadbaf-2f12-4fe8-9fd8-91ba1926bf09,DISK] 2024-12-10T16:34:40,300 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733848479994/wal.1733848480285 2024-12-10T16:34:40,300 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42633:42633),(127.0.0.1/127.0.0.1:44417:44417),(127.0.0.1/127.0.0.1:36795:36795)] 2024-12-10T16:34:40,300 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 049250c0d005b55396dbe65296760c72, NAME => 'testReplayEditsWrittenViaHRegion,,1733848479995.049250c0d005b55396dbe65296760c72.', STARTKEY => '', ENDKEY => ''} 2024-12-10T16:34:40,300 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1733848479995.049250c0d005b55396dbe65296760c72.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T16:34:40,301 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 049250c0d005b55396dbe65296760c72 2024-12-10T16:34:40,301 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 049250c0d005b55396dbe65296760c72 2024-12-10T16:34:40,302 INFO [StoreOpener-049250c0d005b55396dbe65296760c72-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 049250c0d005b55396dbe65296760c72 2024-12-10T16:34:40,303 INFO [StoreOpener-049250c0d005b55396dbe65296760c72-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 049250c0d005b55396dbe65296760c72 columnFamilyName a 2024-12-10T16:34:40,303 DEBUG [StoreOpener-049250c0d005b55396dbe65296760c72-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:40,307 DEBUG [StoreOpener-049250c0d005b55396dbe65296760c72-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/049250c0d005b55396dbe65296760c72/a/a6be1f0bcc69471682b9571a98cc4081 2024-12-10T16:34:40,307 INFO [StoreOpener-049250c0d005b55396dbe65296760c72-1 {}] regionserver.HStore(327): Store=049250c0d005b55396dbe65296760c72/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:40,307 INFO [StoreOpener-049250c0d005b55396dbe65296760c72-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 049250c0d005b55396dbe65296760c72 2024-12-10T16:34:40,309 INFO [StoreOpener-049250c0d005b55396dbe65296760c72-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 049250c0d005b55396dbe65296760c72 columnFamilyName b 2024-12-10T16:34:40,309 DEBUG [StoreOpener-049250c0d005b55396dbe65296760c72-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:40,309 INFO [StoreOpener-049250c0d005b55396dbe65296760c72-1 {}] regionserver.HStore(327): Store=049250c0d005b55396dbe65296760c72/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:40,309 INFO [StoreOpener-049250c0d005b55396dbe65296760c72-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 049250c0d005b55396dbe65296760c72 2024-12-10T16:34:40,310 INFO [StoreOpener-049250c0d005b55396dbe65296760c72-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 049250c0d005b55396dbe65296760c72 columnFamilyName c 2024-12-10T16:34:40,310 DEBUG [StoreOpener-049250c0d005b55396dbe65296760c72-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:40,316 DEBUG [StoreOpener-049250c0d005b55396dbe65296760c72-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/049250c0d005b55396dbe65296760c72/c/ff99e5afbc674bf084852bbf2725e20d 2024-12-10T16:34:40,316 INFO [StoreOpener-049250c0d005b55396dbe65296760c72-1 {}] regionserver.HStore(327): Store=049250c0d005b55396dbe65296760c72/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:40,316 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 049250c0d005b55396dbe65296760c72 2024-12-10T16:34:40,317 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/049250c0d005b55396dbe65296760c72 2024-12-10T16:34:40,319 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/049250c0d005b55396dbe65296760c72 2024-12-10T16:34:40,319 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/049250c0d005b55396dbe65296760c72/recovered.edits/0000000000000000032 2024-12-10T16:34:40,322 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/049250c0d005b55396dbe65296760c72/recovered.edits/0000000000000000032: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T16:34:40,323 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 10, skipped 20, firstSequenceIdInLog=3, maxSequenceIdInLog=32, path=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/049250c0d005b55396dbe65296760c72/recovered.edits/0000000000000000032 2024-12-10T16:34:40,323 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 049250c0d005b55396dbe65296760c72 3/3 column families, dataSize=870 B heapSize=2.31 KB 2024-12-10T16:34:40,337 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/049250c0d005b55396dbe65296760c72/.tmp/b/1a770ca3f021433f9bbe4357b847471b is 91, key is testReplayEditsWrittenViaHRegion/b:x0/1733848480095/Put/seqid=0 2024-12-10T16:34:40,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741900_1078 (size=5958) 2024-12-10T16:34:40,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741900_1078 (size=5958) 2024-12-10T16:34:40,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741900_1078 (size=5958) 2024-12-10T16:34:40,344 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=32 (bloomFilter=true), to=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/049250c0d005b55396dbe65296760c72/.tmp/b/1a770ca3f021433f9bbe4357b847471b 2024-12-10T16:34:40,350 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/049250c0d005b55396dbe65296760c72/.tmp/b/1a770ca3f021433f9bbe4357b847471b as hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/049250c0d005b55396dbe65296760c72/b/1a770ca3f021433f9bbe4357b847471b 2024-12-10T16:34:40,356 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/049250c0d005b55396dbe65296760c72/b/1a770ca3f021433f9bbe4357b847471b, entries=10, sequenceid=32, filesize=5.8 K 2024-12-10T16:34:40,357 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~870 B/870, heapSize ~1.80 KB/1840, currentSize=0 B/0 for 049250c0d005b55396dbe65296760c72 in 34ms, sequenceid=32, compaction requested=false; wal=null 2024-12-10T16:34:40,358 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/049250c0d005b55396dbe65296760c72/recovered.edits/0000000000000000032 2024-12-10T16:34:40,359 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 049250c0d005b55396dbe65296760c72 2024-12-10T16:34:40,359 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 049250c0d005b55396dbe65296760c72 2024-12-10T16:34:40,359 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-10T16:34:40,360 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 049250c0d005b55396dbe65296760c72 2024-12-10T16:34:40,363 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/049250c0d005b55396dbe65296760c72/recovered.edits/33.seqid, newMaxSeqId=33, maxSeqId=1 2024-12-10T16:34:40,364 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 049250c0d005b55396dbe65296760c72; next sequenceid=34; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68270566, jitterRate=0.01731070876121521}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-10T16:34:40,364 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 049250c0d005b55396dbe65296760c72: Writing region info on filesystem at 1733848480301Initializing all the Stores at 1733848480302 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848480302Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848480302Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848480302Obtaining lock to block concurrent updates at 1733848480323 (+21 ms)Preparing flush snapshotting stores in 049250c0d005b55396dbe65296760c72 at 1733848480323Finished memstore snapshotting testReplayEditsWrittenViaHRegion,,1733848479995.049250c0d005b55396dbe65296760c72., syncing WAL and waiting on mvcc, flushsize=dataSize=870, getHeapSize=2320, getOffHeapSize=0, getCellsCount=10 at 1733848480323Flushing stores of testReplayEditsWrittenViaHRegion,,1733848479995.049250c0d005b55396dbe65296760c72. at 1733848480323Flushing 049250c0d005b55396dbe65296760c72/b: creating writer at 1733848480324 (+1 ms)Flushing 049250c0d005b55396dbe65296760c72/b: appending metadata at 1733848480336 (+12 ms)Flushing 049250c0d005b55396dbe65296760c72/b: closing flushed file at 1733848480336Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3de00102: reopening flushed file at 1733848480349 (+13 ms)Finished flush of dataSize ~870 B/870, heapSize ~1.80 KB/1840, currentSize=0 B/0 for 049250c0d005b55396dbe65296760c72 in 34ms, sequenceid=32, compaction requested=false; wal=null at 1733848480357 (+8 ms)Cleaning up temporary data from old regions at 1733848480359 (+2 ms)Region opened successfully at 1733848480364 (+5 ms) 2024-12-10T16:34:40,384 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterPartialFlush Thread=425 (was 415) Potentially hanging thread: PacketResponder: BP-1758511473-172.17.0.3-1733848457790:blk_1073741899_1077, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-20-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_545150750_22 at /127.0.0.1:34558 [Receiving block BP-1758511473-172.17.0.3-1733848457790:blk_1073741899_1077] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_545150750_22 at /127.0.0.1:45638 [Receiving block BP-1758511473-172.17.0.3-1733848457790:blk_1073741899_1077] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1758511473-172.17.0.3-1733848457790:blk_1073741899_1077, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1758511473-172.17.0.3-1733848457790:blk_1073741899_1077, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_545150750_22 at /127.0.0.1:59994 [Waiting for operation #31] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_545150750_22 at /127.0.0.1:34656 [Waiting for operation #31] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_545150750_22 at /127.0.0.1:55852 [Receiving block BP-1758511473-172.17.0.3-1733848457790:blk_1073741899_1077] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-20-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-20-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_545150750_22 at /127.0.0.1:55020 [Waiting for operation #40] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1129 (was 1053) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=178 (was 178), ProcessCount=11 (was 11), AvailableMemoryMB=4851 (was 4859) 2024-12-10T16:34:40,384 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1129 is superior to 1024 2024-12-10T16:34:40,395 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterAbortingFlush Thread=425, OpenFileDescriptor=1129, MaxFileDescriptor=1048576, SystemLoadAverage=178, ProcessCount=11, AvailableMemoryMB=4850 2024-12-10T16:34:40,395 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1129 is superior to 1024 2024-12-10T16:34:40,409 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T16:34:40,411 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T16:34:40,412 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T16:34:40,414 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-85730383, suffix=, logDir=hdfs://localhost:35477/hbase/WALs/hregion-85730383, archiveDir=hdfs://localhost:35477/hbase/oldWALs, maxLogs=32 2024-12-10T16:34:40,427 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-85730383/hregion-85730383.1733848480415, exclude list is [], retry=0 2024-12-10T16:34:40,430 DEBUG [AsyncFSWAL-22-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46873,DS-ddfb4a98-1093-42ea-847d-0d86a4cd1023,DISK] 2024-12-10T16:34:40,430 DEBUG [AsyncFSWAL-22-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43913,DS-3efadbaf-2f12-4fe8-9fd8-91ba1926bf09,DISK] 2024-12-10T16:34:40,430 DEBUG [AsyncFSWAL-22-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42039,DS-c05f9a42-583c-4f58-bba4-77e13705c0bc,DISK] 2024-12-10T16:34:40,435 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-85730383/hregion-85730383.1733848480415 2024-12-10T16:34:40,435 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42633:42633),(127.0.0.1/127.0.0.1:36795:36795),(127.0.0.1/127.0.0.1:44417:44417)] 2024-12-10T16:34:40,435 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 14f8f2d22391093edee0f092dab3e199, NAME => 'testReplayEditsAfterAbortingFlush,,1733848480409.14f8f2d22391093edee0f092dab3e199.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsAfterAbortingFlush', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35477/hbase 2024-12-10T16:34:40,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741902_1080 (size=68) 2024-12-10T16:34:40,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741902_1080 (size=68) 2024-12-10T16:34:40,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741902_1080 (size=68) 2024-12-10T16:34:40,448 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterAbortingFlush,,1733848480409.14f8f2d22391093edee0f092dab3e199.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T16:34:40,449 INFO [StoreOpener-14f8f2d22391093edee0f092dab3e199-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 14f8f2d22391093edee0f092dab3e199 2024-12-10T16:34:40,451 INFO [StoreOpener-14f8f2d22391093edee0f092dab3e199-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 14f8f2d22391093edee0f092dab3e199 columnFamilyName a 2024-12-10T16:34:40,451 DEBUG [StoreOpener-14f8f2d22391093edee0f092dab3e199-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:40,451 INFO [StoreOpener-14f8f2d22391093edee0f092dab3e199-1 {}] regionserver.HStore(327): Store=14f8f2d22391093edee0f092dab3e199/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:40,451 INFO [StoreOpener-14f8f2d22391093edee0f092dab3e199-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 14f8f2d22391093edee0f092dab3e199 2024-12-10T16:34:40,453 INFO [StoreOpener-14f8f2d22391093edee0f092dab3e199-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 14f8f2d22391093edee0f092dab3e199 columnFamilyName b 2024-12-10T16:34:40,453 DEBUG [StoreOpener-14f8f2d22391093edee0f092dab3e199-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:40,453 INFO [StoreOpener-14f8f2d22391093edee0f092dab3e199-1 {}] regionserver.HStore(327): Store=14f8f2d22391093edee0f092dab3e199/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:40,454 INFO [StoreOpener-14f8f2d22391093edee0f092dab3e199-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 14f8f2d22391093edee0f092dab3e199 2024-12-10T16:34:40,455 INFO [StoreOpener-14f8f2d22391093edee0f092dab3e199-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 14f8f2d22391093edee0f092dab3e199 columnFamilyName c 2024-12-10T16:34:40,455 DEBUG [StoreOpener-14f8f2d22391093edee0f092dab3e199-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:40,456 INFO [StoreOpener-14f8f2d22391093edee0f092dab3e199-1 {}] regionserver.HStore(327): Store=14f8f2d22391093edee0f092dab3e199/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:40,456 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 14f8f2d22391093edee0f092dab3e199 2024-12-10T16:34:40,456 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testReplayEditsAfterAbortingFlush/14f8f2d22391093edee0f092dab3e199 2024-12-10T16:34:40,457 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testReplayEditsAfterAbortingFlush/14f8f2d22391093edee0f092dab3e199 2024-12-10T16:34:40,457 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 14f8f2d22391093edee0f092dab3e199 2024-12-10T16:34:40,458 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 14f8f2d22391093edee0f092dab3e199 2024-12-10T16:34:40,458 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterAbortingFlush descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-10T16:34:40,459 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 14f8f2d22391093edee0f092dab3e199 2024-12-10T16:34:40,461 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35477/hbase/data/default/testReplayEditsAfterAbortingFlush/14f8f2d22391093edee0f092dab3e199/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T16:34:40,462 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 14f8f2d22391093edee0f092dab3e199; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66270280, jitterRate=-0.012495875358581543}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-10T16:34:40,463 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 14f8f2d22391093edee0f092dab3e199: Writing region info on filesystem at 1733848480448Initializing all the Stores at 1733848480449 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848480449Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848480449Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848480449Cleaning up temporary data from old regions at 1733848480458 (+9 ms)Region opened successfully at 1733848480463 (+5 ms) 2024-12-10T16:34:40,463 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 14f8f2d22391093edee0f092dab3e199, disabling compactions & flushes 2024-12-10T16:34:40,463 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterAbortingFlush,,1733848480409.14f8f2d22391093edee0f092dab3e199. 2024-12-10T16:34:40,463 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterAbortingFlush,,1733848480409.14f8f2d22391093edee0f092dab3e199. 2024-12-10T16:34:40,463 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterAbortingFlush,,1733848480409.14f8f2d22391093edee0f092dab3e199. after waiting 0 ms 2024-12-10T16:34:40,463 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterAbortingFlush,,1733848480409.14f8f2d22391093edee0f092dab3e199. 2024-12-10T16:34:40,464 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsAfterAbortingFlush,,1733848480409.14f8f2d22391093edee0f092dab3e199. 2024-12-10T16:34:40,464 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 14f8f2d22391093edee0f092dab3e199: Waiting for close lock at 1733848480463Disabling compacts and flushes for region at 1733848480463Disabling writes for close at 1733848480463Writing region close event to WAL at 1733848480464 (+1 ms)Closed at 1733848480464 2024-12-10T16:34:40,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741901_1079 (size=95) 2024-12-10T16:34:40,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741901_1079 (size=95) 2024-12-10T16:34:40,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741901_1079 (size=95) 2024-12-10T16:34:40,469 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-10T16:34:40,469 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-85730383:(num 1733848480415) 2024-12-10T16:34:40,469 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-10T16:34:40,472 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:35477/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733848480408, archiveDir=hdfs://localhost:35477/hbase/oldWALs, maxLogs=32 2024-12-10T16:34:40,487 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733848480408/wal.1733848480472, exclude list is [], retry=0 2024-12-10T16:34:40,490 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42039,DS-c05f9a42-583c-4f58-bba4-77e13705c0bc,DISK] 2024-12-10T16:34:40,491 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43913,DS-3efadbaf-2f12-4fe8-9fd8-91ba1926bf09,DISK] 2024-12-10T16:34:40,491 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46873,DS-ddfb4a98-1093-42ea-847d-0d86a4cd1023,DISK] 2024-12-10T16:34:40,492 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733848480408/wal.1733848480472 2024-12-10T16:34:40,493 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44417:44417),(127.0.0.1/127.0.0.1:36795:36795),(127.0.0.1/127.0.0.1:42633:42633)] 2024-12-10T16:34:40,544 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 14f8f2d22391093edee0f092dab3e199, NAME => 'testReplayEditsAfterAbortingFlush,,1733848480409.14f8f2d22391093edee0f092dab3e199.', STARTKEY => '', ENDKEY => ''} 2024-12-10T16:34:40,546 DEBUG [Time-limited test {}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterAbortingFlush 14f8f2d22391093edee0f092dab3e199 2024-12-10T16:34:40,546 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterAbortingFlush,,1733848480409.14f8f2d22391093edee0f092dab3e199.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T16:34:40,546 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 14f8f2d22391093edee0f092dab3e199 2024-12-10T16:34:40,546 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 14f8f2d22391093edee0f092dab3e199 2024-12-10T16:34:40,548 INFO [StoreOpener-14f8f2d22391093edee0f092dab3e199-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 14f8f2d22391093edee0f092dab3e199 2024-12-10T16:34:40,549 INFO [StoreOpener-14f8f2d22391093edee0f092dab3e199-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 14f8f2d22391093edee0f092dab3e199 columnFamilyName a 2024-12-10T16:34:40,550 DEBUG [StoreOpener-14f8f2d22391093edee0f092dab3e199-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:40,550 INFO [StoreOpener-14f8f2d22391093edee0f092dab3e199-1 {}] regionserver.HStore(327): Store=14f8f2d22391093edee0f092dab3e199/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:40,550 INFO [StoreOpener-14f8f2d22391093edee0f092dab3e199-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 14f8f2d22391093edee0f092dab3e199 2024-12-10T16:34:40,551 INFO [StoreOpener-14f8f2d22391093edee0f092dab3e199-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 14f8f2d22391093edee0f092dab3e199 columnFamilyName b 2024-12-10T16:34:40,551 DEBUG [StoreOpener-14f8f2d22391093edee0f092dab3e199-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:40,552 INFO [StoreOpener-14f8f2d22391093edee0f092dab3e199-1 {}] regionserver.HStore(327): Store=14f8f2d22391093edee0f092dab3e199/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:40,552 INFO [StoreOpener-14f8f2d22391093edee0f092dab3e199-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 14f8f2d22391093edee0f092dab3e199 2024-12-10T16:34:40,553 INFO [StoreOpener-14f8f2d22391093edee0f092dab3e199-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 14f8f2d22391093edee0f092dab3e199 columnFamilyName c 2024-12-10T16:34:40,553 DEBUG [StoreOpener-14f8f2d22391093edee0f092dab3e199-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:40,553 INFO [StoreOpener-14f8f2d22391093edee0f092dab3e199-1 {}] regionserver.HStore(327): Store=14f8f2d22391093edee0f092dab3e199/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:40,554 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 14f8f2d22391093edee0f092dab3e199 2024-12-10T16:34:40,554 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testReplayEditsAfterAbortingFlush/14f8f2d22391093edee0f092dab3e199 2024-12-10T16:34:40,556 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testReplayEditsAfterAbortingFlush/14f8f2d22391093edee0f092dab3e199 2024-12-10T16:34:40,557 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 14f8f2d22391093edee0f092dab3e199 2024-12-10T16:34:40,557 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 14f8f2d22391093edee0f092dab3e199 2024-12-10T16:34:40,557 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterAbortingFlush descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-10T16:34:40,559 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 14f8f2d22391093edee0f092dab3e199 2024-12-10T16:34:40,559 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 14f8f2d22391093edee0f092dab3e199; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59495613, jitterRate=-0.11344628036022186}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-10T16:34:40,560 DEBUG [Time-limited test {}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 14f8f2d22391093edee0f092dab3e199 2024-12-10T16:34:40,560 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 14f8f2d22391093edee0f092dab3e199: Running coprocessor pre-open hook at 1733848480546Writing region info on filesystem at 1733848480546Initializing all the Stores at 1733848480548 (+2 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848480548Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848480548Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848480548Cleaning up temporary data from old regions at 1733848480557 (+9 ms)Running coprocessor post-open hooks at 1733848480560 (+3 ms)Region opened successfully at 1733848480560 2024-12-10T16:34:40,575 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 14f8f2d22391093edee0f092dab3e199 3/3 column families, dataSize=590 B heapSize=2.08 KB 2024-12-10T16:34:40,576 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 14f8f2d22391093edee0f092dab3e199/a, retrying num=0 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T16:34:41,478 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsWrittenViaHRegion 2024-12-10T16:34:41,478 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsWrittenViaHRegion Metrics about Tables on a single HBase RegionServer 2024-12-10T16:34:41,479 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testSequentialEditLogSeqNum 2024-12-10T16:34:41,480 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testSequentialEditLogSeqNum Metrics about Tables on a single HBase RegionServer 2024-12-10T16:34:41,481 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsAfterRegionMovedWithMultiCF 2024-12-10T16:34:41,481 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsAfterRegionMovedWithMultiCF Metrics about Tables on a single HBase RegionServer 2024-12-10T16:34:41,482 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testRegionMadeOfBulkLoadedFilesOnly 2024-12-10T16:34:41,482 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testRegionMadeOfBulkLoadedFilesOnly Metrics about Tables on a single HBase RegionServer 2024-12-10T16:34:41,483 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsAfterAbortingFlush 2024-12-10T16:34:41,483 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsAfterAbortingFlush Metrics about Tables on a single HBase RegionServer 2024-12-10T16:34:41,577 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 14f8f2d22391093edee0f092dab3e199/a, retrying num=1 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T16:34:42,480 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-10T16:34:42,578 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 14f8f2d22391093edee0f092dab3e199/a, retrying num=2 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T16:34:43,580 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 14f8f2d22391093edee0f092dab3e199/a, retrying num=3 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T16:34:44,582 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 14f8f2d22391093edee0f092dab3e199/a, retrying num=4 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T16:34:45,583 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 14f8f2d22391093edee0f092dab3e199/a, retrying num=5 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T16:34:46,584 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 14f8f2d22391093edee0f092dab3e199/a, retrying num=6 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T16:34:47,586 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 14f8f2d22391093edee0f092dab3e199/a, retrying num=7 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T16:34:48,587 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 14f8f2d22391093edee0f092dab3e199/a, retrying num=8 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T16:34:49,588 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 14f8f2d22391093edee0f092dab3e199/a, retrying num=9 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T16:34:49,593 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 14f8f2d22391093edee0f092dab3e199: 2024-12-10T16:34:49,593 INFO [Time-limited test {}] wal.AbstractTestWALReplay(671): Expected simulated exception when flushing region, region: testReplayEditsAfterAbortingFlush,,1733848480409.14f8f2d22391093edee0f092dab3e199. 2024-12-10T16:34:49,611 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 14f8f2d22391093edee0f092dab3e199: 2024-12-10T16:34:49,611 INFO [Time-limited test {}] wal.AbstractTestWALReplay(691): Expected exception when flushing region because server is stopped,Aborting flush because server is aborted... 2024-12-10T16:34:49,611 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 14f8f2d22391093edee0f092dab3e199, disabling compactions & flushes 2024-12-10T16:34:49,611 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterAbortingFlush,,1733848480409.14f8f2d22391093edee0f092dab3e199. 2024-12-10T16:34:49,611 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterAbortingFlush,,1733848480409.14f8f2d22391093edee0f092dab3e199. 2024-12-10T16:34:49,611 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterAbortingFlush,,1733848480409.14f8f2d22391093edee0f092dab3e199. after waiting 0 ms 2024-12-10T16:34:49,611 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterAbortingFlush,,1733848480409.14f8f2d22391093edee0f092dab3e199. 2024-12-10T16:34:49,612 ERROR [Time-limited test {}] regionserver.HRegion(1960): Memstore data size is 1190 in region testReplayEditsAfterAbortingFlush,,1733848480409.14f8f2d22391093edee0f092dab3e199. 2024-12-10T16:34:49,612 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsAfterAbortingFlush,,1733848480409.14f8f2d22391093edee0f092dab3e199. 2024-12-10T16:34:49,612 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 14f8f2d22391093edee0f092dab3e199: Waiting for close lock at 1733848489611Running coprocessor pre-close hooks at 1733848489611Disabling compacts and flushes for region at 1733848489611Disabling writes for close at 1733848489611Writing region close event to WAL at 1733848489612 (+1 ms)Running coprocessor post-close hooks at 1733848489612Closed at 1733848489612 2024-12-10T16:34:49,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741903_1081 (size=2685) 2024-12-10T16:34:49,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741903_1081 (size=2685) 2024-12-10T16:34:49,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741903_1081 (size=2685) 2024-12-10T16:34:49,629 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:35477/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733848480408/wal.1733848480472, size=2.6 K (2685bytes) 2024-12-10T16:34:49,629 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35477/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733848480408/wal.1733848480472 2024-12-10T16:34:49,630 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:35477/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733848480408/wal.1733848480472 after 1ms 2024-12-10T16:34:49,632 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:35477/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733848480408/wal.1733848480472: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T16:34:49,632 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:35477/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733848480408/wal.1733848480472 took 3ms 2024-12-10T16:34:49,635 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:35477/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733848480408/wal.1733848480472 so closing down 2024-12-10T16:34:49,635 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-10T16:34:49,636 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000004-wal.1733848480472.temp 2024-12-10T16:34:49,637 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testReplayEditsAfterAbortingFlush/14f8f2d22391093edee0f092dab3e199/recovered.edits/0000000000000000004-wal.1733848480472.temp 2024-12-10T16:34:49,638 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-10T16:34:49,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741904_1082 (size=2094) 2024-12-10T16:34:49,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741904_1082 (size=2094) 2024-12-10T16:34:49,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741904_1082 (size=2094) 2024-12-10T16:34:49,646 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testReplayEditsAfterAbortingFlush/14f8f2d22391093edee0f092dab3e199/recovered.edits/0000000000000000004-wal.1733848480472.temp (wrote 20 edits, skipped 0 edits in 0 ms) 2024-12-10T16:34:49,647 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:35477/hbase/data/default/testReplayEditsAfterAbortingFlush/14f8f2d22391093edee0f092dab3e199/recovered.edits/0000000000000000004-wal.1733848480472.temp to hdfs://localhost:35477/hbase/data/default/testReplayEditsAfterAbortingFlush/14f8f2d22391093edee0f092dab3e199/recovered.edits/0000000000000000026 2024-12-10T16:34:49,648 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 23 edits across 1 Regions in 15 ms; skipped=3; WAL=hdfs://localhost:35477/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733848480408/wal.1733848480472, size=2.6 K, length=2685, corrupted=false, cancelled=false 2024-12-10T16:34:49,648 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:35477/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733848480408/wal.1733848480472, journal: Splitting hdfs://localhost:35477/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733848480408/wal.1733848480472, size=2.6 K (2685bytes) at 1733848489629Finishing writing output for hdfs://localhost:35477/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733848480408/wal.1733848480472 so closing down at 1733848489635 (+6 ms)Creating recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testReplayEditsAfterAbortingFlush/14f8f2d22391093edee0f092dab3e199/recovered.edits/0000000000000000004-wal.1733848480472.temp at 1733848489637 (+2 ms)3 split writer threads finished at 1733848489638 (+1 ms)Closed recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testReplayEditsAfterAbortingFlush/14f8f2d22391093edee0f092dab3e199/recovered.edits/0000000000000000004-wal.1733848480472.temp (wrote 20 edits, skipped 0 edits in 0 ms) at 1733848489646 (+8 ms)Rename recovered edits hdfs://localhost:35477/hbase/data/default/testReplayEditsAfterAbortingFlush/14f8f2d22391093edee0f092dab3e199/recovered.edits/0000000000000000004-wal.1733848480472.temp to hdfs://localhost:35477/hbase/data/default/testReplayEditsAfterAbortingFlush/14f8f2d22391093edee0f092dab3e199/recovered.edits/0000000000000000026 at 1733848489647 (+1 ms)Processed 23 edits across 1 Regions in 15 ms; skipped=3; WAL=hdfs://localhost:35477/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733848480408/wal.1733848480472, size=2.6 K, length=2685, corrupted=false, cancelled=false at 1733848489648 (+1 ms) 2024-12-10T16:34:49,649 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:35477/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733848480408/wal.1733848480472 to hdfs://localhost:35477/hbase/oldWALs/wal.1733848480472 2024-12-10T16:34:49,650 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:35477/hbase/data/default/testReplayEditsAfterAbortingFlush/14f8f2d22391093edee0f092dab3e199/recovered.edits/0000000000000000026 2024-12-10T16:34:49,650 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-10T16:34:49,651 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:35477/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733848480408, archiveDir=hdfs://localhost:35477/hbase/oldWALs, maxLogs=32 2024-12-10T16:34:49,665 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733848480408/wal.1733848489652, exclude list is [], retry=0 2024-12-10T16:34:49,667 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46873,DS-ddfb4a98-1093-42ea-847d-0d86a4cd1023,DISK] 2024-12-10T16:34:49,668 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43913,DS-3efadbaf-2f12-4fe8-9fd8-91ba1926bf09,DISK] 2024-12-10T16:34:49,668 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42039,DS-c05f9a42-583c-4f58-bba4-77e13705c0bc,DISK] 2024-12-10T16:34:49,669 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733848480408/wal.1733848489652 2024-12-10T16:34:49,669 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42633:42633),(127.0.0.1/127.0.0.1:36795:36795),(127.0.0.1/127.0.0.1:44417:44417)] 2024-12-10T16:34:49,670 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 14f8f2d22391093edee0f092dab3e199, NAME => 'testReplayEditsAfterAbortingFlush,,1733848480409.14f8f2d22391093edee0f092dab3e199.', STARTKEY => '', ENDKEY => ''} 2024-12-10T16:34:49,670 DEBUG [Time-limited test {}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterAbortingFlush 14f8f2d22391093edee0f092dab3e199 2024-12-10T16:34:49,670 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterAbortingFlush,,1733848480409.14f8f2d22391093edee0f092dab3e199.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T16:34:49,670 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 14f8f2d22391093edee0f092dab3e199 2024-12-10T16:34:49,670 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 14f8f2d22391093edee0f092dab3e199 2024-12-10T16:34:49,672 INFO [StoreOpener-14f8f2d22391093edee0f092dab3e199-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 14f8f2d22391093edee0f092dab3e199 2024-12-10T16:34:49,673 INFO [StoreOpener-14f8f2d22391093edee0f092dab3e199-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 14f8f2d22391093edee0f092dab3e199 columnFamilyName a 2024-12-10T16:34:49,673 DEBUG [StoreOpener-14f8f2d22391093edee0f092dab3e199-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:49,674 INFO [StoreOpener-14f8f2d22391093edee0f092dab3e199-1 {}] regionserver.HStore(327): Store=14f8f2d22391093edee0f092dab3e199/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:49,674 INFO [StoreOpener-14f8f2d22391093edee0f092dab3e199-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 14f8f2d22391093edee0f092dab3e199 2024-12-10T16:34:49,674 INFO [StoreOpener-14f8f2d22391093edee0f092dab3e199-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 14f8f2d22391093edee0f092dab3e199 columnFamilyName b 2024-12-10T16:34:49,674 DEBUG [StoreOpener-14f8f2d22391093edee0f092dab3e199-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:49,675 INFO [StoreOpener-14f8f2d22391093edee0f092dab3e199-1 {}] regionserver.HStore(327): Store=14f8f2d22391093edee0f092dab3e199/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:49,675 INFO [StoreOpener-14f8f2d22391093edee0f092dab3e199-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 14f8f2d22391093edee0f092dab3e199 2024-12-10T16:34:49,676 INFO [StoreOpener-14f8f2d22391093edee0f092dab3e199-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 14f8f2d22391093edee0f092dab3e199 columnFamilyName c 2024-12-10T16:34:49,676 DEBUG [StoreOpener-14f8f2d22391093edee0f092dab3e199-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:49,676 INFO [StoreOpener-14f8f2d22391093edee0f092dab3e199-1 {}] regionserver.HStore(327): Store=14f8f2d22391093edee0f092dab3e199/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:49,676 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 14f8f2d22391093edee0f092dab3e199 2024-12-10T16:34:49,677 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testReplayEditsAfterAbortingFlush/14f8f2d22391093edee0f092dab3e199 2024-12-10T16:34:49,678 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testReplayEditsAfterAbortingFlush/14f8f2d22391093edee0f092dab3e199 2024-12-10T16:34:49,679 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:35477/hbase/data/default/testReplayEditsAfterAbortingFlush/14f8f2d22391093edee0f092dab3e199/recovered.edits/0000000000000000026 2024-12-10T16:34:49,681 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:35477/hbase/data/default/testReplayEditsAfterAbortingFlush/14f8f2d22391093edee0f092dab3e199/recovered.edits/0000000000000000026: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T16:34:49,683 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 20, skipped 0, firstSequenceIdInLog=4, maxSequenceIdInLog=26, path=hdfs://localhost:35477/hbase/data/default/testReplayEditsAfterAbortingFlush/14f8f2d22391093edee0f092dab3e199/recovered.edits/0000000000000000026 2024-12-10T16:34:49,683 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 14f8f2d22391093edee0f092dab3e199 3/3 column families, dataSize=1.16 KB heapSize=3.41 KB 2024-12-10T16:34:49,696 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35477/hbase/data/default/testReplayEditsAfterAbortingFlush/14f8f2d22391093edee0f092dab3e199/.tmp/a/e648c51d779448bfbd226ce87651e2ae is 64, key is testReplayEditsAfterAbortingFlush12/a:q/1733848489601/Put/seqid=0 2024-12-10T16:34:49,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741906_1084 (size=5523) 2024-12-10T16:34:49,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741906_1084 (size=5523) 2024-12-10T16:34:49,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741906_1084 (size=5523) 2024-12-10T16:34:49,703 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=416 B at sequenceid=26 (bloomFilter=true), to=hdfs://localhost:35477/hbase/data/default/testReplayEditsAfterAbortingFlush/14f8f2d22391093edee0f092dab3e199/.tmp/a/e648c51d779448bfbd226ce87651e2ae 2024-12-10T16:34:49,722 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35477/hbase/data/default/testReplayEditsAfterAbortingFlush/14f8f2d22391093edee0f092dab3e199/.tmp/b/218d425d2d35489899b689c0c4f4daba is 64, key is testReplayEditsAfterAbortingFlush10/b:q/1733848489595/Put/seqid=0 2024-12-10T16:34:49,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741907_1085 (size=5524) 2024-12-10T16:34:49,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741907_1085 (size=5524) 2024-12-10T16:34:49,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741907_1085 (size=5524) 2024-12-10T16:34:49,729 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=417 B at sequenceid=26 (bloomFilter=true), to=hdfs://localhost:35477/hbase/data/default/testReplayEditsAfterAbortingFlush/14f8f2d22391093edee0f092dab3e199/.tmp/b/218d425d2d35489899b689c0c4f4daba 2024-12-10T16:34:49,756 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35477/hbase/data/default/testReplayEditsAfterAbortingFlush/14f8f2d22391093edee0f092dab3e199/.tmp/c/3ae0b7ad00914250b49fd75da949722b is 64, key is testReplayEditsAfterAbortingFlush11/c:q/1733848489598/Put/seqid=0 2024-12-10T16:34:49,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741908_1086 (size=5457) 2024-12-10T16:34:49,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741908_1086 (size=5457) 2024-12-10T16:34:49,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741908_1086 (size=5457) 2024-12-10T16:34:49,765 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=357 B at sequenceid=26 (bloomFilter=true), to=hdfs://localhost:35477/hbase/data/default/testReplayEditsAfterAbortingFlush/14f8f2d22391093edee0f092dab3e199/.tmp/c/3ae0b7ad00914250b49fd75da949722b 2024-12-10T16:34:49,771 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35477/hbase/data/default/testReplayEditsAfterAbortingFlush/14f8f2d22391093edee0f092dab3e199/.tmp/a/e648c51d779448bfbd226ce87651e2ae as hdfs://localhost:35477/hbase/data/default/testReplayEditsAfterAbortingFlush/14f8f2d22391093edee0f092dab3e199/a/e648c51d779448bfbd226ce87651e2ae 2024-12-10T16:34:49,776 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35477/hbase/data/default/testReplayEditsAfterAbortingFlush/14f8f2d22391093edee0f092dab3e199/a/e648c51d779448bfbd226ce87651e2ae, entries=7, sequenceid=26, filesize=5.4 K 2024-12-10T16:34:49,778 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35477/hbase/data/default/testReplayEditsAfterAbortingFlush/14f8f2d22391093edee0f092dab3e199/.tmp/b/218d425d2d35489899b689c0c4f4daba as hdfs://localhost:35477/hbase/data/default/testReplayEditsAfterAbortingFlush/14f8f2d22391093edee0f092dab3e199/b/218d425d2d35489899b689c0c4f4daba 2024-12-10T16:34:49,784 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35477/hbase/data/default/testReplayEditsAfterAbortingFlush/14f8f2d22391093edee0f092dab3e199/b/218d425d2d35489899b689c0c4f4daba, entries=7, sequenceid=26, filesize=5.4 K 2024-12-10T16:34:49,785 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35477/hbase/data/default/testReplayEditsAfterAbortingFlush/14f8f2d22391093edee0f092dab3e199/.tmp/c/3ae0b7ad00914250b49fd75da949722b as hdfs://localhost:35477/hbase/data/default/testReplayEditsAfterAbortingFlush/14f8f2d22391093edee0f092dab3e199/c/3ae0b7ad00914250b49fd75da949722b 2024-12-10T16:34:49,791 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35477/hbase/data/default/testReplayEditsAfterAbortingFlush/14f8f2d22391093edee0f092dab3e199/c/3ae0b7ad00914250b49fd75da949722b, entries=6, sequenceid=26, filesize=5.3 K 2024-12-10T16:34:49,791 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.16 KB/1190, heapSize ~3.36 KB/3440, currentSize=0 B/0 for 14f8f2d22391093edee0f092dab3e199 in 108ms, sequenceid=26, compaction requested=false; wal=null 2024-12-10T16:34:49,792 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:35477/hbase/data/default/testReplayEditsAfterAbortingFlush/14f8f2d22391093edee0f092dab3e199/recovered.edits/0000000000000000026 2024-12-10T16:34:49,793 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 14f8f2d22391093edee0f092dab3e199 2024-12-10T16:34:49,793 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 14f8f2d22391093edee0f092dab3e199 2024-12-10T16:34:49,794 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterAbortingFlush descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-10T16:34:49,795 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 14f8f2d22391093edee0f092dab3e199 2024-12-10T16:34:49,797 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35477/hbase/data/default/testReplayEditsAfterAbortingFlush/14f8f2d22391093edee0f092dab3e199/recovered.edits/26.seqid, newMaxSeqId=26, maxSeqId=1 2024-12-10T16:34:49,799 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 14f8f2d22391093edee0f092dab3e199; next sequenceid=27; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61238095, jitterRate=-0.08748127520084381}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-10T16:34:49,799 DEBUG [Time-limited test {}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 14f8f2d22391093edee0f092dab3e199 2024-12-10T16:34:49,799 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 14f8f2d22391093edee0f092dab3e199: Running coprocessor pre-open hook at 1733848489670Writing region info on filesystem at 1733848489670Initializing all the Stores at 1733848489671 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848489671Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848489671Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848489671Obtaining lock to block concurrent updates at 1733848489683 (+12 ms)Preparing flush snapshotting stores in 14f8f2d22391093edee0f092dab3e199 at 1733848489683Finished memstore snapshotting testReplayEditsAfterAbortingFlush,,1733848480409.14f8f2d22391093edee0f092dab3e199., syncing WAL and waiting on mvcc, flushsize=dataSize=1190, getHeapSize=3440, getOffHeapSize=0, getCellsCount=20 at 1733848489683Flushing stores of testReplayEditsAfterAbortingFlush,,1733848480409.14f8f2d22391093edee0f092dab3e199. at 1733848489683Flushing 14f8f2d22391093edee0f092dab3e199/a: creating writer at 1733848489683Flushing 14f8f2d22391093edee0f092dab3e199/a: appending metadata at 1733848489696 (+13 ms)Flushing 14f8f2d22391093edee0f092dab3e199/a: closing flushed file at 1733848489696Flushing 14f8f2d22391093edee0f092dab3e199/b: creating writer at 1733848489707 (+11 ms)Flushing 14f8f2d22391093edee0f092dab3e199/b: appending metadata at 1733848489721 (+14 ms)Flushing 14f8f2d22391093edee0f092dab3e199/b: closing flushed file at 1733848489721Flushing 14f8f2d22391093edee0f092dab3e199/c: creating writer at 1733848489735 (+14 ms)Flushing 14f8f2d22391093edee0f092dab3e199/c: appending metadata at 1733848489755 (+20 ms)Flushing 14f8f2d22391093edee0f092dab3e199/c: closing flushed file at 1733848489755Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@b4e4f39: reopening flushed file at 1733848489770 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3c975e7: reopening flushed file at 1733848489777 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@282fc303: reopening flushed file at 1733848489784 (+7 ms)Finished flush of dataSize ~1.16 KB/1190, heapSize ~3.36 KB/3440, currentSize=0 B/0 for 14f8f2d22391093edee0f092dab3e199 in 108ms, sequenceid=26, compaction requested=false; wal=null at 1733848489791 (+7 ms)Cleaning up temporary data from old regions at 1733848489793 (+2 ms)Running coprocessor post-open hooks at 1733848489799 (+6 ms)Region opened successfully at 1733848489799 2024-12-10T16:34:49,822 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterAbortingFlush Thread=421 (was 425), OpenFileDescriptor=1183 (was 1129) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=158 (was 178), ProcessCount=11 (was 11), AvailableMemoryMB=4846 (was 4850) 2024-12-10T16:34:49,822 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1183 is superior to 1024 2024-12-10T16:34:49,835 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testDatalossWhenInputError Thread=421, OpenFileDescriptor=1183, MaxFileDescriptor=1048576, SystemLoadAverage=158, ProcessCount=11, AvailableMemoryMB=4844 2024-12-10T16:34:49,835 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1183 is superior to 1024 2024-12-10T16:34:49,848 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T16:34:49,849 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T16:34:49,850 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T16:34:49,852 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-20741739, suffix=, logDir=hdfs://localhost:35477/hbase/WALs/hregion-20741739, archiveDir=hdfs://localhost:35477/hbase/oldWALs, maxLogs=32 2024-12-10T16:34:49,863 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-20741739/hregion-20741739.1733848489852, exclude list is [], retry=0 2024-12-10T16:34:49,866 DEBUG [AsyncFSWAL-24-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42039,DS-c05f9a42-583c-4f58-bba4-77e13705c0bc,DISK] 2024-12-10T16:34:49,866 DEBUG [AsyncFSWAL-24-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43913,DS-3efadbaf-2f12-4fe8-9fd8-91ba1926bf09,DISK] 2024-12-10T16:34:49,867 DEBUG [AsyncFSWAL-24-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46873,DS-ddfb4a98-1093-42ea-847d-0d86a4cd1023,DISK] 2024-12-10T16:34:49,868 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-20741739/hregion-20741739.1733848489852 2024-12-10T16:34:49,869 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44417:44417),(127.0.0.1/127.0.0.1:36795:36795),(127.0.0.1/127.0.0.1:42633:42633)] 2024-12-10T16:34:49,869 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 3f909998a2c5c4cebcd2c32c0c1c0c36, NAME => 'testDatalossWhenInputError,,1733848489848.3f909998a2c5c4cebcd2c32c0c1c0c36.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testDatalossWhenInputError', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35477/hbase 2024-12-10T16:34:49,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741910_1088 (size=61) 2024-12-10T16:34:49,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741910_1088 (size=61) 2024-12-10T16:34:49,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741910_1088 (size=61) 2024-12-10T16:34:49,879 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testDatalossWhenInputError,,1733848489848.3f909998a2c5c4cebcd2c32c0c1c0c36.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T16:34:49,880 INFO [StoreOpener-3f909998a2c5c4cebcd2c32c0c1c0c36-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 3f909998a2c5c4cebcd2c32c0c1c0c36 2024-12-10T16:34:49,881 INFO [StoreOpener-3f909998a2c5c4cebcd2c32c0c1c0c36-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3f909998a2c5c4cebcd2c32c0c1c0c36 columnFamilyName a 2024-12-10T16:34:49,881 DEBUG [StoreOpener-3f909998a2c5c4cebcd2c32c0c1c0c36-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:49,882 INFO [StoreOpener-3f909998a2c5c4cebcd2c32c0c1c0c36-1 {}] regionserver.HStore(327): Store=3f909998a2c5c4cebcd2c32c0c1c0c36/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:49,882 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 3f909998a2c5c4cebcd2c32c0c1c0c36 2024-12-10T16:34:49,883 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testDatalossWhenInputError/3f909998a2c5c4cebcd2c32c0c1c0c36 2024-12-10T16:34:49,883 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testDatalossWhenInputError/3f909998a2c5c4cebcd2c32c0c1c0c36 2024-12-10T16:34:49,884 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 3f909998a2c5c4cebcd2c32c0c1c0c36 2024-12-10T16:34:49,884 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 3f909998a2c5c4cebcd2c32c0c1c0c36 2024-12-10T16:34:49,886 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 3f909998a2c5c4cebcd2c32c0c1c0c36 2024-12-10T16:34:49,888 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35477/hbase/data/default/testDatalossWhenInputError/3f909998a2c5c4cebcd2c32c0c1c0c36/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T16:34:49,888 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 3f909998a2c5c4cebcd2c32c0c1c0c36; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70827859, jitterRate=0.055417343974113464}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T16:34:49,889 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 3f909998a2c5c4cebcd2c32c0c1c0c36: Writing region info on filesystem at 1733848489879Initializing all the Stores at 1733848489880 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848489880Cleaning up temporary data from old regions at 1733848489884 (+4 ms)Region opened successfully at 1733848489889 (+5 ms) 2024-12-10T16:34:49,889 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 3f909998a2c5c4cebcd2c32c0c1c0c36, disabling compactions & flushes 2024-12-10T16:34:49,889 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testDatalossWhenInputError,,1733848489848.3f909998a2c5c4cebcd2c32c0c1c0c36. 2024-12-10T16:34:49,889 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testDatalossWhenInputError,,1733848489848.3f909998a2c5c4cebcd2c32c0c1c0c36. 2024-12-10T16:34:49,889 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testDatalossWhenInputError,,1733848489848.3f909998a2c5c4cebcd2c32c0c1c0c36. after waiting 0 ms 2024-12-10T16:34:49,889 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testDatalossWhenInputError,,1733848489848.3f909998a2c5c4cebcd2c32c0c1c0c36. 2024-12-10T16:34:49,890 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testDatalossWhenInputError,,1733848489848.3f909998a2c5c4cebcd2c32c0c1c0c36. 2024-12-10T16:34:49,890 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 3f909998a2c5c4cebcd2c32c0c1c0c36: Waiting for close lock at 1733848489889Disabling compacts and flushes for region at 1733848489889Disabling writes for close at 1733848489889Writing region close event to WAL at 1733848489890 (+1 ms)Closed at 1733848489890 2024-12-10T16:34:49,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741909_1087 (size=95) 2024-12-10T16:34:49,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741909_1087 (size=95) 2024-12-10T16:34:49,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741909_1087 (size=95) 2024-12-10T16:34:49,894 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-10T16:34:49,894 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-20741739:(num 1733848489852) 2024-12-10T16:34:49,894 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-10T16:34:49,896 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:35477/hbase/WALs/testdatalosswheninputerror-manual,16010,1733848489847, archiveDir=hdfs://localhost:35477/hbase/oldWALs, maxLogs=32 2024-12-10T16:34:49,908 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testdatalosswheninputerror-manual,16010,1733848489847/wal.1733848489896, exclude list is [], retry=0 2024-12-10T16:34:49,911 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46873,DS-ddfb4a98-1093-42ea-847d-0d86a4cd1023,DISK] 2024-12-10T16:34:49,911 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42039,DS-c05f9a42-583c-4f58-bba4-77e13705c0bc,DISK] 2024-12-10T16:34:49,911 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43913,DS-3efadbaf-2f12-4fe8-9fd8-91ba1926bf09,DISK] 2024-12-10T16:34:49,913 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testdatalosswheninputerror-manual,16010,1733848489847/wal.1733848489896 2024-12-10T16:34:49,913 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42633:42633),(127.0.0.1/127.0.0.1:44417:44417),(127.0.0.1/127.0.0.1:36795:36795)] 2024-12-10T16:34:49,913 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 3f909998a2c5c4cebcd2c32c0c1c0c36, NAME => 'testDatalossWhenInputError,,1733848489848.3f909998a2c5c4cebcd2c32c0c1c0c36.', STARTKEY => '', ENDKEY => ''} 2024-12-10T16:34:49,913 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testDatalossWhenInputError,,1733848489848.3f909998a2c5c4cebcd2c32c0c1c0c36.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T16:34:49,913 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 3f909998a2c5c4cebcd2c32c0c1c0c36 2024-12-10T16:34:49,913 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 3f909998a2c5c4cebcd2c32c0c1c0c36 2024-12-10T16:34:49,915 INFO [StoreOpener-3f909998a2c5c4cebcd2c32c0c1c0c36-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 3f909998a2c5c4cebcd2c32c0c1c0c36 2024-12-10T16:34:49,916 INFO [StoreOpener-3f909998a2c5c4cebcd2c32c0c1c0c36-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3f909998a2c5c4cebcd2c32c0c1c0c36 columnFamilyName a 2024-12-10T16:34:49,916 DEBUG [StoreOpener-3f909998a2c5c4cebcd2c32c0c1c0c36-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:49,917 INFO [StoreOpener-3f909998a2c5c4cebcd2c32c0c1c0c36-1 {}] regionserver.HStore(327): Store=3f909998a2c5c4cebcd2c32c0c1c0c36/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:49,917 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 3f909998a2c5c4cebcd2c32c0c1c0c36 2024-12-10T16:34:49,917 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testDatalossWhenInputError/3f909998a2c5c4cebcd2c32c0c1c0c36 2024-12-10T16:34:49,918 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testDatalossWhenInputError/3f909998a2c5c4cebcd2c32c0c1c0c36 2024-12-10T16:34:49,919 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 3f909998a2c5c4cebcd2c32c0c1c0c36 2024-12-10T16:34:49,919 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 3f909998a2c5c4cebcd2c32c0c1c0c36 2024-12-10T16:34:49,920 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 3f909998a2c5c4cebcd2c32c0c1c0c36 2024-12-10T16:34:49,921 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 3f909998a2c5c4cebcd2c32c0c1c0c36; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62486645, jitterRate=-0.06887643039226532}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T16:34:49,921 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 3f909998a2c5c4cebcd2c32c0c1c0c36: Writing region info on filesystem at 1733848489914Initializing all the Stores at 1733848489914Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848489914Cleaning up temporary data from old regions at 1733848489919 (+5 ms)Region opened successfully at 1733848489921 (+2 ms) 2024-12-10T16:34:49,931 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 3f909998a2c5c4cebcd2c32c0c1c0c36, disabling compactions & flushes 2024-12-10T16:34:49,931 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testDatalossWhenInputError,,1733848489848.3f909998a2c5c4cebcd2c32c0c1c0c36. 2024-12-10T16:34:49,931 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testDatalossWhenInputError,,1733848489848.3f909998a2c5c4cebcd2c32c0c1c0c36. 2024-12-10T16:34:49,931 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testDatalossWhenInputError,,1733848489848.3f909998a2c5c4cebcd2c32c0c1c0c36. after waiting 0 ms 2024-12-10T16:34:49,931 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testDatalossWhenInputError,,1733848489848.3f909998a2c5c4cebcd2c32c0c1c0c36. 2024-12-10T16:34:49,932 ERROR [Time-limited test {}] regionserver.HRegion(1960): Memstore data size is 750 in region testDatalossWhenInputError,,1733848489848.3f909998a2c5c4cebcd2c32c0c1c0c36. 2024-12-10T16:34:49,932 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testDatalossWhenInputError,,1733848489848.3f909998a2c5c4cebcd2c32c0c1c0c36. 2024-12-10T16:34:49,932 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 3f909998a2c5c4cebcd2c32c0c1c0c36: Waiting for close lock at 1733848489931Disabling compacts and flushes for region at 1733848489931Disabling writes for close at 1733848489931Writing region close event to WAL at 1733848489932 (+1 ms)Closed at 1733848489932 2024-12-10T16:34:49,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741911_1089 (size=1050) 2024-12-10T16:34:49,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741911_1089 (size=1050) 2024-12-10T16:34:49,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741911_1089 (size=1050) 2024-12-10T16:34:49,948 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:35477/hbase/WALs/testdatalosswheninputerror-manual,16010,1733848489847/wal.1733848489896, size=1.0 K (1050bytes) 2024-12-10T16:34:49,948 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35477/hbase/WALs/testdatalosswheninputerror-manual,16010,1733848489847/wal.1733848489896 2024-12-10T16:34:49,949 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:35477/hbase/WALs/testdatalosswheninputerror-manual,16010,1733848489847/wal.1733848489896 after 1ms 2024-12-10T16:34:49,951 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:35477/hbase/WALs/testdatalosswheninputerror-manual,16010,1733848489847/wal.1733848489896: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T16:34:49,951 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:35477/hbase/WALs/testdatalosswheninputerror-manual,16010,1733848489847/wal.1733848489896 took 3ms 2024-12-10T16:34:49,953 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:35477/hbase/WALs/testdatalosswheninputerror-manual,16010,1733848489847/wal.1733848489896 so closing down 2024-12-10T16:34:49,953 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-10T16:34:49,954 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1733848489896.temp 2024-12-10T16:34:49,955 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testDatalossWhenInputError/3f909998a2c5c4cebcd2c32c0c1c0c36/recovered.edits/0000000000000000003-wal.1733848489896.temp 2024-12-10T16:34:49,955 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-10T16:34:49,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741912_1090 (size=1050) 2024-12-10T16:34:49,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741912_1090 (size=1050) 2024-12-10T16:34:49,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741912_1090 (size=1050) 2024-12-10T16:34:49,965 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testDatalossWhenInputError/3f909998a2c5c4cebcd2c32c0c1c0c36/recovered.edits/0000000000000000003-wal.1733848489896.temp (wrote 10 edits, skipped 0 edits in 0 ms) 2024-12-10T16:34:49,967 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:35477/hbase/data/default/testDatalossWhenInputError/3f909998a2c5c4cebcd2c32c0c1c0c36/recovered.edits/0000000000000000003-wal.1733848489896.temp to hdfs://localhost:35477/hbase/data/default/testDatalossWhenInputError/3f909998a2c5c4cebcd2c32c0c1c0c36/recovered.edits/0000000000000000012 2024-12-10T16:34:49,967 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 10 edits across 1 Regions in 16 ms; skipped=0; WAL=hdfs://localhost:35477/hbase/WALs/testdatalosswheninputerror-manual,16010,1733848489847/wal.1733848489896, size=1.0 K, length=1050, corrupted=false, cancelled=false 2024-12-10T16:34:49,967 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:35477/hbase/WALs/testdatalosswheninputerror-manual,16010,1733848489847/wal.1733848489896, journal: Splitting hdfs://localhost:35477/hbase/WALs/testdatalosswheninputerror-manual,16010,1733848489847/wal.1733848489896, size=1.0 K (1050bytes) at 1733848489948Finishing writing output for hdfs://localhost:35477/hbase/WALs/testdatalosswheninputerror-manual,16010,1733848489847/wal.1733848489896 so closing down at 1733848489953 (+5 ms)Creating recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testDatalossWhenInputError/3f909998a2c5c4cebcd2c32c0c1c0c36/recovered.edits/0000000000000000003-wal.1733848489896.temp at 1733848489955 (+2 ms)3 split writer threads finished at 1733848489955Closed recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testDatalossWhenInputError/3f909998a2c5c4cebcd2c32c0c1c0c36/recovered.edits/0000000000000000003-wal.1733848489896.temp (wrote 10 edits, skipped 0 edits in 0 ms) at 1733848489965 (+10 ms)Rename recovered edits hdfs://localhost:35477/hbase/data/default/testDatalossWhenInputError/3f909998a2c5c4cebcd2c32c0c1c0c36/recovered.edits/0000000000000000003-wal.1733848489896.temp to hdfs://localhost:35477/hbase/data/default/testDatalossWhenInputError/3f909998a2c5c4cebcd2c32c0c1c0c36/recovered.edits/0000000000000000012 at 1733848489967 (+2 ms)Processed 10 edits across 1 Regions in 16 ms; skipped=0; WAL=hdfs://localhost:35477/hbase/WALs/testdatalosswheninputerror-manual,16010,1733848489847/wal.1733848489896, size=1.0 K, length=1050, corrupted=false, cancelled=false at 1733848489967 2024-12-10T16:34:49,968 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:35477/hbase/WALs/testdatalosswheninputerror-manual,16010,1733848489847/wal.1733848489896 to hdfs://localhost:35477/hbase/oldWALs/wal.1733848489896 2024-12-10T16:34:49,969 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:35477/hbase/data/default/testDatalossWhenInputError/3f909998a2c5c4cebcd2c32c0c1c0c36/recovered.edits/0000000000000000012 2024-12-10T16:34:49,972 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:35477/hbase/data/default/testDatalossWhenInputError/3f909998a2c5c4cebcd2c32c0c1c0c36/recovered.edits/0000000000000000012: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T16:34:50,315 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-10T16:34:50,317 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:35477/hbase/WALs/testdatalosswheninputerror-manual,16010,1733848489847, archiveDir=hdfs://localhost:35477/hbase/oldWALs, maxLogs=32 2024-12-10T16:34:50,328 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testdatalosswheninputerror-manual,16010,1733848489847/wal.1733848490317, exclude list is [], retry=0 2024-12-10T16:34:50,331 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42039,DS-c05f9a42-583c-4f58-bba4-77e13705c0bc,DISK] 2024-12-10T16:34:50,331 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43913,DS-3efadbaf-2f12-4fe8-9fd8-91ba1926bf09,DISK] 2024-12-10T16:34:50,331 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46873,DS-ddfb4a98-1093-42ea-847d-0d86a4cd1023,DISK] 2024-12-10T16:34:50,333 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testdatalosswheninputerror-manual,16010,1733848489847/wal.1733848490317 2024-12-10T16:34:50,333 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44417:44417),(127.0.0.1/127.0.0.1:36795:36795),(127.0.0.1/127.0.0.1:42633:42633)] 2024-12-10T16:34:50,333 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 3f909998a2c5c4cebcd2c32c0c1c0c36, NAME => 'testDatalossWhenInputError,,1733848489848.3f909998a2c5c4cebcd2c32c0c1c0c36.', STARTKEY => '', ENDKEY => ''} 2024-12-10T16:34:50,333 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testDatalossWhenInputError,,1733848489848.3f909998a2c5c4cebcd2c32c0c1c0c36.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T16:34:50,333 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 3f909998a2c5c4cebcd2c32c0c1c0c36 2024-12-10T16:34:50,333 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 3f909998a2c5c4cebcd2c32c0c1c0c36 2024-12-10T16:34:50,336 INFO [StoreOpener-3f909998a2c5c4cebcd2c32c0c1c0c36-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 3f909998a2c5c4cebcd2c32c0c1c0c36 2024-12-10T16:34:50,338 INFO [StoreOpener-3f909998a2c5c4cebcd2c32c0c1c0c36-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3f909998a2c5c4cebcd2c32c0c1c0c36 columnFamilyName a 2024-12-10T16:34:50,338 DEBUG [StoreOpener-3f909998a2c5c4cebcd2c32c0c1c0c36-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:50,339 INFO [StoreOpener-3f909998a2c5c4cebcd2c32c0c1c0c36-1 {}] regionserver.HStore(327): Store=3f909998a2c5c4cebcd2c32c0c1c0c36/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:50,339 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 3f909998a2c5c4cebcd2c32c0c1c0c36 2024-12-10T16:34:50,339 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testDatalossWhenInputError/3f909998a2c5c4cebcd2c32c0c1c0c36 2024-12-10T16:34:50,341 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testDatalossWhenInputError/3f909998a2c5c4cebcd2c32c0c1c0c36 2024-12-10T16:34:50,342 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:35477/hbase/data/default/testDatalossWhenInputError/3f909998a2c5c4cebcd2c32c0c1c0c36/recovered.edits/0000000000000000012 2024-12-10T16:34:50,345 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:35477/hbase/data/default/testDatalossWhenInputError/3f909998a2c5c4cebcd2c32c0c1c0c36/recovered.edits/0000000000000000012: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T16:34:50,346 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 10, skipped 0, firstSequenceIdInLog=3, maxSequenceIdInLog=12, path=hdfs://localhost:35477/hbase/data/default/testDatalossWhenInputError/3f909998a2c5c4cebcd2c32c0c1c0c36/recovered.edits/0000000000000000012 2024-12-10T16:34:50,347 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 3f909998a2c5c4cebcd2c32c0c1c0c36 1/1 column families, dataSize=750 B heapSize=1.73 KB 2024-12-10T16:34:50,355 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-10T16:34:50,371 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35477/hbase/data/default/testDatalossWhenInputError/3f909998a2c5c4cebcd2c32c0c1c0c36/.tmp/a/b55d9987b3f64a008687467f330f698e is 79, key is testDatalossWhenInputError/a:x0/1733848489921/Put/seqid=0 2024-12-10T16:34:50,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741914_1092 (size=5808) 2024-12-10T16:34:50,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741914_1092 (size=5808) 2024-12-10T16:34:50,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741914_1092 (size=5808) 2024-12-10T16:34:50,378 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=750 B at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:35477/hbase/data/default/testDatalossWhenInputError/3f909998a2c5c4cebcd2c32c0c1c0c36/.tmp/a/b55d9987b3f64a008687467f330f698e 2024-12-10T16:34:50,391 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35477/hbase/data/default/testDatalossWhenInputError/3f909998a2c5c4cebcd2c32c0c1c0c36/.tmp/a/b55d9987b3f64a008687467f330f698e as hdfs://localhost:35477/hbase/data/default/testDatalossWhenInputError/3f909998a2c5c4cebcd2c32c0c1c0c36/a/b55d9987b3f64a008687467f330f698e 2024-12-10T16:34:50,402 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35477/hbase/data/default/testDatalossWhenInputError/3f909998a2c5c4cebcd2c32c0c1c0c36/a/b55d9987b3f64a008687467f330f698e, entries=10, sequenceid=12, filesize=5.7 K 2024-12-10T16:34:50,403 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~750 B/750, heapSize ~1.72 KB/1760, currentSize=0 B/0 for 3f909998a2c5c4cebcd2c32c0c1c0c36 in 56ms, sequenceid=12, compaction requested=false; wal=null 2024-12-10T16:34:50,403 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:35477/hbase/data/default/testDatalossWhenInputError/3f909998a2c5c4cebcd2c32c0c1c0c36/recovered.edits/0000000000000000012 2024-12-10T16:34:50,405 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 3f909998a2c5c4cebcd2c32c0c1c0c36 2024-12-10T16:34:50,405 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 3f909998a2c5c4cebcd2c32c0c1c0c36 2024-12-10T16:34:50,408 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 3f909998a2c5c4cebcd2c32c0c1c0c36 2024-12-10T16:34:50,410 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35477/hbase/data/default/testDatalossWhenInputError/3f909998a2c5c4cebcd2c32c0c1c0c36/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=1 2024-12-10T16:34:50,411 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 3f909998a2c5c4cebcd2c32c0c1c0c36; next sequenceid=13; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72872803, jitterRate=0.08588938415050507}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T16:34:50,412 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 3f909998a2c5c4cebcd2c32c0c1c0c36: Writing region info on filesystem at 1733848490333Initializing all the Stores at 1733848490336 (+3 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848490336Obtaining lock to block concurrent updates at 1733848490347 (+11 ms)Preparing flush snapshotting stores in 3f909998a2c5c4cebcd2c32c0c1c0c36 at 1733848490347Finished memstore snapshotting testDatalossWhenInputError,,1733848489848.3f909998a2c5c4cebcd2c32c0c1c0c36., syncing WAL and waiting on mvcc, flushsize=dataSize=750, getHeapSize=1760, getOffHeapSize=0, getCellsCount=10 at 1733848490347Flushing stores of testDatalossWhenInputError,,1733848489848.3f909998a2c5c4cebcd2c32c0c1c0c36. at 1733848490347Flushing 3f909998a2c5c4cebcd2c32c0c1c0c36/a: creating writer at 1733848490347Flushing 3f909998a2c5c4cebcd2c32c0c1c0c36/a: appending metadata at 1733848490371 (+24 ms)Flushing 3f909998a2c5c4cebcd2c32c0c1c0c36/a: closing flushed file at 1733848490371Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@20df89c1: reopening flushed file at 1733848490388 (+17 ms)Finished flush of dataSize ~750 B/750, heapSize ~1.72 KB/1760, currentSize=0 B/0 for 3f909998a2c5c4cebcd2c32c0c1c0c36 in 56ms, sequenceid=12, compaction requested=false; wal=null at 1733848490403 (+15 ms)Cleaning up temporary data from old regions at 1733848490405 (+2 ms)Region opened successfully at 1733848490412 (+7 ms) 2024-12-10T16:34:50,416 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 3f909998a2c5c4cebcd2c32c0c1c0c36, NAME => 'testDatalossWhenInputError,,1733848489848.3f909998a2c5c4cebcd2c32c0c1c0c36.', STARTKEY => '', ENDKEY => ''} 2024-12-10T16:34:50,416 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testDatalossWhenInputError,,1733848489848.3f909998a2c5c4cebcd2c32c0c1c0c36.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T16:34:50,416 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 3f909998a2c5c4cebcd2c32c0c1c0c36 2024-12-10T16:34:50,416 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 3f909998a2c5c4cebcd2c32c0c1c0c36 2024-12-10T16:34:50,418 INFO [StoreOpener-3f909998a2c5c4cebcd2c32c0c1c0c36-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 3f909998a2c5c4cebcd2c32c0c1c0c36 2024-12-10T16:34:50,419 INFO [StoreOpener-3f909998a2c5c4cebcd2c32c0c1c0c36-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3f909998a2c5c4cebcd2c32c0c1c0c36 columnFamilyName a 2024-12-10T16:34:50,419 DEBUG [StoreOpener-3f909998a2c5c4cebcd2c32c0c1c0c36-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:50,425 DEBUG [StoreOpener-3f909998a2c5c4cebcd2c32c0c1c0c36-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35477/hbase/data/default/testDatalossWhenInputError/3f909998a2c5c4cebcd2c32c0c1c0c36/a/b55d9987b3f64a008687467f330f698e 2024-12-10T16:34:50,425 INFO [StoreOpener-3f909998a2c5c4cebcd2c32c0c1c0c36-1 {}] regionserver.HStore(327): Store=3f909998a2c5c4cebcd2c32c0c1c0c36/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:50,425 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 3f909998a2c5c4cebcd2c32c0c1c0c36 2024-12-10T16:34:50,426 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testDatalossWhenInputError/3f909998a2c5c4cebcd2c32c0c1c0c36 2024-12-10T16:34:50,427 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testDatalossWhenInputError/3f909998a2c5c4cebcd2c32c0c1c0c36 2024-12-10T16:34:50,427 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 3f909998a2c5c4cebcd2c32c0c1c0c36 2024-12-10T16:34:50,427 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 3f909998a2c5c4cebcd2c32c0c1c0c36 2024-12-10T16:34:50,429 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 3f909998a2c5c4cebcd2c32c0c1c0c36 2024-12-10T16:34:50,431 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35477/hbase/data/default/testDatalossWhenInputError/3f909998a2c5c4cebcd2c32c0c1c0c36/recovered.edits/13.seqid, newMaxSeqId=13, maxSeqId=12 2024-12-10T16:34:50,432 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 3f909998a2c5c4cebcd2c32c0c1c0c36; next sequenceid=14; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61140595, jitterRate=-0.08893413841724396}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T16:34:50,432 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 3f909998a2c5c4cebcd2c32c0c1c0c36: Writing region info on filesystem at 1733848490416Initializing all the Stores at 1733848490417 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848490417Cleaning up temporary data from old regions at 1733848490427 (+10 ms)Region opened successfully at 1733848490432 (+5 ms) 2024-12-10T16:34:50,447 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testDatalossWhenInputError Thread=431 (was 421) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_545150750_22 at /127.0.0.1:34626 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1758511473-172.17.0.3-1733848457790:blk_1073741913_1091, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-24-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_545150750_22 at /127.0.0.1:56818 [Receiving block BP-1758511473-172.17.0.3-1733848457790:blk_1073741913_1091] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_545150750_22 at /127.0.0.1:49330 [Receiving block BP-1758511473-172.17.0.3-1733848457790:blk_1073741913_1091] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_545150750_22 at /127.0.0.1:42082 [Receiving block BP-1758511473-172.17.0.3-1733848457790:blk_1073741913_1091] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1758511473-172.17.0.3-1733848457790:blk_1073741913_1091, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1758511473-172.17.0.3-1733848457790:blk_1073741913_1091, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_545150750_22 at /127.0.0.1:55932 [Waiting for operation #8] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-24-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_545150750_22 at /127.0.0.1:45684 [Waiting for operation #29] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-24-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1265 (was 1183) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=158 (was 158), ProcessCount=11 (was 11), AvailableMemoryMB=4837 (was 4844) 2024-12-10T16:34:50,448 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1265 is superior to 1024 2024-12-10T16:34:50,461 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testCompactedBulkLoadedFiles Thread=431, OpenFileDescriptor=1265, MaxFileDescriptor=1048576, SystemLoadAverage=158, ProcessCount=11, AvailableMemoryMB=4836 2024-12-10T16:34:50,461 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1265 is superior to 1024 2024-12-10T16:34:50,482 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T16:34:50,484 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T16:34:50,485 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T16:34:50,487 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-78696497, suffix=, logDir=hdfs://localhost:35477/hbase/WALs/hregion-78696497, archiveDir=hdfs://localhost:35477/hbase/oldWALs, maxLogs=32 2024-12-10T16:34:50,503 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-78696497/hregion-78696497.1733848490488, exclude list is [], retry=0 2024-12-10T16:34:50,506 DEBUG [AsyncFSWAL-26-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46873,DS-ddfb4a98-1093-42ea-847d-0d86a4cd1023,DISK] 2024-12-10T16:34:50,506 DEBUG [AsyncFSWAL-26-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43913,DS-3efadbaf-2f12-4fe8-9fd8-91ba1926bf09,DISK] 2024-12-10T16:34:50,507 DEBUG [AsyncFSWAL-26-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42039,DS-c05f9a42-583c-4f58-bba4-77e13705c0bc,DISK] 2024-12-10T16:34:50,508 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-78696497/hregion-78696497.1733848490488 2024-12-10T16:34:50,508 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42633:42633),(127.0.0.1/127.0.0.1:36795:36795),(127.0.0.1/127.0.0.1:44417:44417)] 2024-12-10T16:34:50,509 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 29aef6281497cde7be319b98d41a5734, NAME => 'testCompactedBulkLoadedFiles,,1733848490482.29aef6281497cde7be319b98d41a5734.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testCompactedBulkLoadedFiles', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35477/hbase 2024-12-10T16:34:50,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741916_1094 (size=63) 2024-12-10T16:34:50,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741916_1094 (size=63) 2024-12-10T16:34:50,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741916_1094 (size=63) 2024-12-10T16:34:50,517 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testCompactedBulkLoadedFiles,,1733848490482.29aef6281497cde7be319b98d41a5734.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T16:34:50,520 INFO [StoreOpener-29aef6281497cde7be319b98d41a5734-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 29aef6281497cde7be319b98d41a5734 2024-12-10T16:34:50,521 INFO [StoreOpener-29aef6281497cde7be319b98d41a5734-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 29aef6281497cde7be319b98d41a5734 columnFamilyName a 2024-12-10T16:34:50,521 DEBUG [StoreOpener-29aef6281497cde7be319b98d41a5734-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:50,521 INFO [StoreOpener-29aef6281497cde7be319b98d41a5734-1 {}] regionserver.HStore(327): Store=29aef6281497cde7be319b98d41a5734/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:50,522 INFO [StoreOpener-29aef6281497cde7be319b98d41a5734-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 29aef6281497cde7be319b98d41a5734 2024-12-10T16:34:50,523 INFO [StoreOpener-29aef6281497cde7be319b98d41a5734-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 29aef6281497cde7be319b98d41a5734 columnFamilyName b 2024-12-10T16:34:50,523 DEBUG [StoreOpener-29aef6281497cde7be319b98d41a5734-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:50,523 INFO [StoreOpener-29aef6281497cde7be319b98d41a5734-1 {}] regionserver.HStore(327): Store=29aef6281497cde7be319b98d41a5734/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:50,523 INFO [StoreOpener-29aef6281497cde7be319b98d41a5734-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 29aef6281497cde7be319b98d41a5734 2024-12-10T16:34:50,524 INFO [StoreOpener-29aef6281497cde7be319b98d41a5734-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 29aef6281497cde7be319b98d41a5734 columnFamilyName c 2024-12-10T16:34:50,524 DEBUG [StoreOpener-29aef6281497cde7be319b98d41a5734-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:50,525 INFO [StoreOpener-29aef6281497cde7be319b98d41a5734-1 {}] regionserver.HStore(327): Store=29aef6281497cde7be319b98d41a5734/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:50,525 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 29aef6281497cde7be319b98d41a5734 2024-12-10T16:34:50,526 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734 2024-12-10T16:34:50,526 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734 2024-12-10T16:34:50,527 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 29aef6281497cde7be319b98d41a5734 2024-12-10T16:34:50,527 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 29aef6281497cde7be319b98d41a5734 2024-12-10T16:34:50,528 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testCompactedBulkLoadedFiles descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-10T16:34:50,529 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 29aef6281497cde7be319b98d41a5734 2024-12-10T16:34:50,531 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T16:34:50,531 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 29aef6281497cde7be319b98d41a5734; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71075885, jitterRate=0.05911321938037872}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-10T16:34:50,531 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 29aef6281497cde7be319b98d41a5734: Writing region info on filesystem at 1733848490517Initializing all the Stores at 1733848490518 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848490518Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848490519 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848490519Cleaning up temporary data from old regions at 1733848490527 (+8 ms)Region opened successfully at 1733848490531 (+4 ms) 2024-12-10T16:34:50,532 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 29aef6281497cde7be319b98d41a5734, disabling compactions & flushes 2024-12-10T16:34:50,532 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testCompactedBulkLoadedFiles,,1733848490482.29aef6281497cde7be319b98d41a5734. 2024-12-10T16:34:50,532 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testCompactedBulkLoadedFiles,,1733848490482.29aef6281497cde7be319b98d41a5734. 2024-12-10T16:34:50,532 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testCompactedBulkLoadedFiles,,1733848490482.29aef6281497cde7be319b98d41a5734. after waiting 0 ms 2024-12-10T16:34:50,532 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testCompactedBulkLoadedFiles,,1733848490482.29aef6281497cde7be319b98d41a5734. 2024-12-10T16:34:50,532 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testCompactedBulkLoadedFiles,,1733848490482.29aef6281497cde7be319b98d41a5734. 2024-12-10T16:34:50,532 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 29aef6281497cde7be319b98d41a5734: Waiting for close lock at 1733848490532Disabling compacts and flushes for region at 1733848490532Disabling writes for close at 1733848490532Writing region close event to WAL at 1733848490532Closed at 1733848490532 2024-12-10T16:34:50,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741915_1093 (size=95) 2024-12-10T16:34:50,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741915_1093 (size=95) 2024-12-10T16:34:50,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741915_1093 (size=95) 2024-12-10T16:34:50,540 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-10T16:34:50,540 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-78696497:(num 1733848490488) 2024-12-10T16:34:50,540 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-10T16:34:50,542 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:35477/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733848490481, archiveDir=hdfs://localhost:35477/hbase/oldWALs, maxLogs=32 2024-12-10T16:34:50,554 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733848490481/wal.1733848490542, exclude list is [], retry=0 2024-12-10T16:34:50,557 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43913,DS-3efadbaf-2f12-4fe8-9fd8-91ba1926bf09,DISK] 2024-12-10T16:34:50,558 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46873,DS-ddfb4a98-1093-42ea-847d-0d86a4cd1023,DISK] 2024-12-10T16:34:50,558 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42039,DS-c05f9a42-583c-4f58-bba4-77e13705c0bc,DISK] 2024-12-10T16:34:50,560 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733848490481/wal.1733848490542 2024-12-10T16:34:50,560 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36795:36795),(127.0.0.1/127.0.0.1:42633:42633),(127.0.0.1/127.0.0.1:44417:44417)] 2024-12-10T16:34:50,560 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 29aef6281497cde7be319b98d41a5734, NAME => 'testCompactedBulkLoadedFiles,,1733848490482.29aef6281497cde7be319b98d41a5734.', STARTKEY => '', ENDKEY => ''} 2024-12-10T16:34:50,560 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testCompactedBulkLoadedFiles,,1733848490482.29aef6281497cde7be319b98d41a5734.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T16:34:50,560 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 29aef6281497cde7be319b98d41a5734 2024-12-10T16:34:50,560 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 29aef6281497cde7be319b98d41a5734 2024-12-10T16:34:50,562 INFO [StoreOpener-29aef6281497cde7be319b98d41a5734-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 29aef6281497cde7be319b98d41a5734 2024-12-10T16:34:50,562 INFO [StoreOpener-29aef6281497cde7be319b98d41a5734-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 29aef6281497cde7be319b98d41a5734 columnFamilyName a 2024-12-10T16:34:50,563 DEBUG [StoreOpener-29aef6281497cde7be319b98d41a5734-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:50,563 INFO [StoreOpener-29aef6281497cde7be319b98d41a5734-1 {}] regionserver.HStore(327): Store=29aef6281497cde7be319b98d41a5734/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:50,563 INFO [StoreOpener-29aef6281497cde7be319b98d41a5734-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 29aef6281497cde7be319b98d41a5734 2024-12-10T16:34:50,564 INFO [StoreOpener-29aef6281497cde7be319b98d41a5734-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 29aef6281497cde7be319b98d41a5734 columnFamilyName b 2024-12-10T16:34:50,564 DEBUG [StoreOpener-29aef6281497cde7be319b98d41a5734-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:50,564 INFO [StoreOpener-29aef6281497cde7be319b98d41a5734-1 {}] regionserver.HStore(327): Store=29aef6281497cde7be319b98d41a5734/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:50,564 INFO [StoreOpener-29aef6281497cde7be319b98d41a5734-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 29aef6281497cde7be319b98d41a5734 2024-12-10T16:34:50,565 INFO [StoreOpener-29aef6281497cde7be319b98d41a5734-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 29aef6281497cde7be319b98d41a5734 columnFamilyName c 2024-12-10T16:34:50,565 DEBUG [StoreOpener-29aef6281497cde7be319b98d41a5734-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:50,565 INFO [StoreOpener-29aef6281497cde7be319b98d41a5734-1 {}] regionserver.HStore(327): Store=29aef6281497cde7be319b98d41a5734/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:50,565 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 29aef6281497cde7be319b98d41a5734 2024-12-10T16:34:50,566 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734 2024-12-10T16:34:50,567 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734 2024-12-10T16:34:50,568 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 29aef6281497cde7be319b98d41a5734 2024-12-10T16:34:50,568 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 29aef6281497cde7be319b98d41a5734 2024-12-10T16:34:50,568 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testCompactedBulkLoadedFiles descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-10T16:34:50,570 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 29aef6281497cde7be319b98d41a5734 2024-12-10T16:34:50,570 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 29aef6281497cde7be319b98d41a5734; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69067294, jitterRate=0.029182881116867065}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-10T16:34:50,571 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 29aef6281497cde7be319b98d41a5734: Writing region info on filesystem at 1733848490561Initializing all the Stores at 1733848490561Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848490561Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848490562 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848490562Cleaning up temporary data from old regions at 1733848490568 (+6 ms)Region opened successfully at 1733848490571 (+3 ms) 2024-12-10T16:34:50,575 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35477/hbase/testCompactedBulkLoadedFiles/hfile0 is 32, key is 000/a:a/1733848490575/Put/seqid=0 2024-12-10T16:34:50,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741918_1096 (size=4875) 2024-12-10T16:34:50,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741918_1096 (size=4875) 2024-12-10T16:34:50,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741918_1096 (size=4875) 2024-12-10T16:34:50,584 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35477/hbase/testCompactedBulkLoadedFiles/hfile1 is 32, key is 100/a:a/1733848490584/Put/seqid=0 2024-12-10T16:34:50,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741919_1097 (size=4875) 2024-12-10T16:34:50,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741919_1097 (size=4875) 2024-12-10T16:34:50,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741919_1097 (size=4875) 2024-12-10T16:34:50,595 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35477/hbase/testCompactedBulkLoadedFiles/hfile2 is 32, key is 200/a:a/1733848490594/Put/seqid=0 2024-12-10T16:34:50,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741920_1098 (size=4875) 2024-12-10T16:34:50,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741920_1098 (size=4875) 2024-12-10T16:34:50,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741920_1098 (size=4875) 2024-12-10T16:34:50,603 INFO [Time-limited test {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:35477/hbase/testCompactedBulkLoadedFiles/hfile0 for inclusion in 29aef6281497cde7be319b98d41a5734/a 2024-12-10T16:34:50,606 DEBUG [Time-limited test {}] regionserver.HStore(626): HFile bounds: first=000 last=050 2024-12-10T16:34:50,606 DEBUG [Time-limited test {}] regionserver.HStore(628): Region bounds: first= last= 2024-12-10T16:34:50,606 INFO [Time-limited test {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:35477/hbase/testCompactedBulkLoadedFiles/hfile1 for inclusion in 29aef6281497cde7be319b98d41a5734/a 2024-12-10T16:34:50,609 DEBUG [Time-limited test {}] regionserver.HStore(626): HFile bounds: first=100 last=150 2024-12-10T16:34:50,610 DEBUG [Time-limited test {}] regionserver.HStore(628): Region bounds: first= last= 2024-12-10T16:34:50,610 INFO [Time-limited test {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:35477/hbase/testCompactedBulkLoadedFiles/hfile2 for inclusion in 29aef6281497cde7be319b98d41a5734/a 2024-12-10T16:34:50,613 DEBUG [Time-limited test {}] regionserver.HStore(626): HFile bounds: first=200 last=250 2024-12-10T16:34:50,613 DEBUG [Time-limited test {}] regionserver.HStore(628): Region bounds: first= last= 2024-12-10T16:34:50,613 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 29aef6281497cde7be319b98d41a5734 3/3 column families, dataSize=51 B heapSize=896 B 2024-12-10T16:34:50,629 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/.tmp/a/e4e77bebf869499086ce61b296d93aa6 is 55, key is testCompactedBulkLoadedFiles/a:a/1733848490572/Put/seqid=0 2024-12-10T16:34:50,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741921_1099 (size=5107) 2024-12-10T16:34:50,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741921_1099 (size=5107) 2024-12-10T16:34:50,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741921_1099 (size=5107) 2024-12-10T16:34:50,640 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51 B at sequenceid=4 (bloomFilter=true), to=hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/.tmp/a/e4e77bebf869499086ce61b296d93aa6 2024-12-10T16:34:50,646 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/.tmp/a/e4e77bebf869499086ce61b296d93aa6 as hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/a/e4e77bebf869499086ce61b296d93aa6 2024-12-10T16:34:50,651 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/a/e4e77bebf869499086ce61b296d93aa6, entries=1, sequenceid=4, filesize=5.0 K 2024-12-10T16:34:50,652 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~51 B/51, heapSize ~368 B/368, currentSize=0 B/0 for 29aef6281497cde7be319b98d41a5734 in 39ms, sequenceid=4, compaction requested=false 2024-12-10T16:34:50,652 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 29aef6281497cde7be319b98d41a5734: 2024-12-10T16:34:50,654 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35477/hbase/testCompactedBulkLoadedFiles/hfile0 as hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/a/4e57be73b315479f8ee001f98dbc7ccb_SeqId_4_ 2024-12-10T16:34:50,655 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35477/hbase/testCompactedBulkLoadedFiles/hfile1 as hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/a/1a5de332c518486692c8a97428037eeb_SeqId_4_ 2024-12-10T16:34:50,656 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35477/hbase/testCompactedBulkLoadedFiles/hfile2 as hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/a/bb61ad15012c4d13b679f8d561620aef_SeqId_4_ 2024-12-10T16:34:50,656 INFO [Time-limited test {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:35477/hbase/testCompactedBulkLoadedFiles/hfile0 into 29aef6281497cde7be319b98d41a5734/a as hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/a/4e57be73b315479f8ee001f98dbc7ccb_SeqId_4_ - updating store file list. 2024-12-10T16:34:50,660 DEBUG [Time-limited test {}] regionserver.HStoreFile(483): HFile Bloom filter type for 4e57be73b315479f8ee001f98dbc7ccb_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-10T16:34:50,661 INFO [Time-limited test {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/a/4e57be73b315479f8ee001f98dbc7ccb_SeqId_4_ into 29aef6281497cde7be319b98d41a5734/a 2024-12-10T16:34:50,661 INFO [Time-limited test {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:35477/hbase/testCompactedBulkLoadedFiles/hfile0 into 29aef6281497cde7be319b98d41a5734/a (new location: hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/a/4e57be73b315479f8ee001f98dbc7ccb_SeqId_4_) 2024-12-10T16:34:50,662 INFO [Time-limited test {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:35477/hbase/testCompactedBulkLoadedFiles/hfile1 into 29aef6281497cde7be319b98d41a5734/a as hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/a/1a5de332c518486692c8a97428037eeb_SeqId_4_ - updating store file list. 2024-12-10T16:34:50,666 DEBUG [Time-limited test {}] regionserver.HStoreFile(483): HFile Bloom filter type for 1a5de332c518486692c8a97428037eeb_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-10T16:34:50,666 INFO [Time-limited test {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/a/1a5de332c518486692c8a97428037eeb_SeqId_4_ into 29aef6281497cde7be319b98d41a5734/a 2024-12-10T16:34:50,666 INFO [Time-limited test {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:35477/hbase/testCompactedBulkLoadedFiles/hfile1 into 29aef6281497cde7be319b98d41a5734/a (new location: hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/a/1a5de332c518486692c8a97428037eeb_SeqId_4_) 2024-12-10T16:34:50,667 INFO [Time-limited test {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:35477/hbase/testCompactedBulkLoadedFiles/hfile2 into 29aef6281497cde7be319b98d41a5734/a as hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/a/bb61ad15012c4d13b679f8d561620aef_SeqId_4_ - updating store file list. 2024-12-10T16:34:50,671 DEBUG [Time-limited test {}] regionserver.HStoreFile(483): HFile Bloom filter type for bb61ad15012c4d13b679f8d561620aef_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-10T16:34:50,672 INFO [Time-limited test {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/a/bb61ad15012c4d13b679f8d561620aef_SeqId_4_ into 29aef6281497cde7be319b98d41a5734/a 2024-12-10T16:34:50,672 INFO [Time-limited test {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:35477/hbase/testCompactedBulkLoadedFiles/hfile2 into 29aef6281497cde7be319b98d41a5734/a (new location: hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/a/bb61ad15012c4d13b679f8d561620aef_SeqId_4_) 2024-12-10T16:34:50,679 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T16:34:50,679 DEBUG [Time-limited test {}] regionserver.HStore(1541): 29aef6281497cde7be319b98d41a5734/a is initiating major compaction (all files) 2024-12-10T16:34:50,679 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 29aef6281497cde7be319b98d41a5734/a in testCompactedBulkLoadedFiles,,1733848490482.29aef6281497cde7be319b98d41a5734. 2024-12-10T16:34:50,679 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/a/e4e77bebf869499086ce61b296d93aa6, hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/a/4e57be73b315479f8ee001f98dbc7ccb_SeqId_4_, hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/a/1a5de332c518486692c8a97428037eeb_SeqId_4_, hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/a/bb61ad15012c4d13b679f8d561620aef_SeqId_4_] into tmpdir=hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/.tmp, totalSize=19.3 K 2024-12-10T16:34:50,680 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting e4e77bebf869499086ce61b296d93aa6, keycount=1, bloomtype=ROW, size=5.0 K, encoding=NONE, compression=NONE, seqNum=4, earliestPutTs=1733848490572 2024-12-10T16:34:50,680 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 4e57be73b315479f8ee001f98dbc7ccb_SeqId_4_, keycount=10, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=4, earliestPutTs=-9223372036854775808 2024-12-10T16:34:50,681 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 1a5de332c518486692c8a97428037eeb_SeqId_4_, keycount=10, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=4, earliestPutTs=-9223372036854775808 2024-12-10T16:34:50,681 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting bb61ad15012c4d13b679f8d561620aef_SeqId_4_, keycount=10, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=4, earliestPutTs=-9223372036854775808 2024-12-10T16:34:50,694 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/.tmp/a/118630957077444988019331911e324d is 55, key is testCompactedBulkLoadedFiles/a:a/1733848490572/Put/seqid=0 2024-12-10T16:34:50,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741922_1100 (size=6154) 2024-12-10T16:34:50,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741922_1100 (size=6154) 2024-12-10T16:34:50,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741922_1100 (size=6154) 2024-12-10T16:34:50,708 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/.tmp/a/118630957077444988019331911e324d as hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/a/118630957077444988019331911e324d 2024-12-10T16:34:50,715 INFO [Time-limited test {}] regionserver.HStore(1337): Completed major compaction of 4 (all) file(s) in 29aef6281497cde7be319b98d41a5734/a of 29aef6281497cde7be319b98d41a5734 into 118630957077444988019331911e324d(size=6.0 K), total size for store is 6.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T16:34:50,715 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 29aef6281497cde7be319b98d41a5734: 2024-12-10T16:34:50,715 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 0 store files, 0 compacting, 0 eligible, 16 blocking 2024-12-10T16:34:50,715 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 0 store files, 0 compacting, 0 eligible, 16 blocking 2024-12-10T16:34:50,749 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:35477/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733848490481/wal.1733848490542, size=0 (0bytes) 2024-12-10T16:34:50,749 WARN [Time-limited test {}] wal.WALSplitter(453): File hdfs://localhost:35477/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733848490481/wal.1733848490542 might be still open, length is 0 2024-12-10T16:34:50,750 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35477/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733848490481/wal.1733848490542 2024-12-10T16:34:50,750 WARN [IPC Server handler 4 on default port 35477 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733848490481/wal.1733848490542 has not been closed. Lease recovery is in progress. RecoveryId = 1101 for block blk_1073741917_1095 2024-12-10T16:34:50,750 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35477/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733848490481/wal.1733848490542 after 0ms 2024-12-10T16:34:51,478 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testDatalossWhenInputError 2024-12-10T16:34:51,478 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testDatalossWhenInputError Metrics about Tables on a single HBase RegionServer 2024-12-10T16:34:51,479 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testCompactedBulkLoadedFiles 2024-12-10T16:34:51,479 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testCompactedBulkLoadedFiles Metrics about Tables on a single HBase RegionServer 2024-12-10T16:34:53,317 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_545150750_22 at /127.0.0.1:56852 [Receiving block BP-1758511473-172.17.0.3-1733848457790:blk_1073741917_1095] {}] datanode.DataXceiver(331): 127.0.0.1:43913:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56852 dst: /127.0.0.1:43913 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:43913 remote=/127.0.0.1:56852]. Total timeout mills is 60000, 57398 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T16:34:53,318 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_545150750_22 at /127.0.0.1:42122 [Receiving block BP-1758511473-172.17.0.3-1733848457790:blk_1073741917_1095] {}] datanode.DataXceiver(331): 127.0.0.1:46873:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42122 dst: /127.0.0.1:46873 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T16:34:53,318 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_545150750_22 at /127.0.0.1:49354 [Receiving block BP-1758511473-172.17.0.3-1733848457790:blk_1073741917_1095] {}] datanode.DataXceiver(331): 127.0.0.1:42039:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49354 dst: /127.0.0.1:42039 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T16:34:53,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741917_1101 (size=1172) 2024-12-10T16:34:53,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741917_1101 (size=1172) 2024-12-10T16:34:54,752 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:35477/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733848490481/wal.1733848490542 after 4002ms 2024-12-10T16:34:54,759 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:35477/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733848490481/wal.1733848490542: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T16:34:54,759 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:35477/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733848490481/wal.1733848490542 took 4010ms 2024-12-10T16:34:54,762 INFO [Time-limited test {}] wal.WALSplitter(493): EOF from hdfs://localhost:35477/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733848490481/wal.1733848490542; continuing. 2024-12-10T16:34:54,762 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:35477/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733848490481/wal.1733848490542 so closing down 2024-12-10T16:34:54,762 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-10T16:34:54,765 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1733848490542.temp 2024-12-10T16:34:54,767 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/recovered.edits/0000000000000000003-wal.1733848490542.temp 2024-12-10T16:34:54,767 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-10T16:34:54,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741923_1102 (size=548) 2024-12-10T16:34:54,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741923_1102 (size=548) 2024-12-10T16:34:54,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741923_1102 (size=548) 2024-12-10T16:34:54,775 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/recovered.edits/0000000000000000003-wal.1733848490542.temp (wrote 2 edits, skipped 0 edits in 0 ms) 2024-12-10T16:34:54,777 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/recovered.edits/0000000000000000003-wal.1733848490542.temp to hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/recovered.edits/0000000000000000008 2024-12-10T16:34:54,777 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 5 edits across 1 Regions in 17 ms; skipped=3; WAL=hdfs://localhost:35477/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733848490481/wal.1733848490542, size=0, length=0, corrupted=false, cancelled=false 2024-12-10T16:34:54,777 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:35477/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733848490481/wal.1733848490542, journal: Splitting hdfs://localhost:35477/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733848490481/wal.1733848490542, size=0 (0bytes) at 1733848490749Finishing writing output for hdfs://localhost:35477/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733848490481/wal.1733848490542 so closing down at 1733848494762 (+4013 ms)Creating recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/recovered.edits/0000000000000000003-wal.1733848490542.temp at 1733848494767 (+5 ms)3 split writer threads finished at 1733848494767Closed recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/recovered.edits/0000000000000000003-wal.1733848490542.temp (wrote 2 edits, skipped 0 edits in 0 ms) at 1733848494775 (+8 ms)Rename recovered edits hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/recovered.edits/0000000000000000003-wal.1733848490542.temp to hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/recovered.edits/0000000000000000008 at 1733848494777 (+2 ms)Processed 5 edits across 1 Regions in 17 ms; skipped=3; WAL=hdfs://localhost:35477/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733848490481/wal.1733848490542, size=0, length=0, corrupted=false, cancelled=false at 1733848494777 2024-12-10T16:34:54,779 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:35477/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733848490481/wal.1733848490542 to hdfs://localhost:35477/hbase/oldWALs/wal.1733848490542 2024-12-10T16:34:54,780 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/recovered.edits/0000000000000000008 2024-12-10T16:34:54,780 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-10T16:34:54,782 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:35477/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733848490481, archiveDir=hdfs://localhost:35477/hbase/oldWALs, maxLogs=32 2024-12-10T16:34:54,794 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733848490481/wal.1733848494782, exclude list is [], retry=0 2024-12-10T16:34:54,797 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42039,DS-c05f9a42-583c-4f58-bba4-77e13705c0bc,DISK] 2024-12-10T16:34:54,797 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43913,DS-3efadbaf-2f12-4fe8-9fd8-91ba1926bf09,DISK] 2024-12-10T16:34:54,797 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46873,DS-ddfb4a98-1093-42ea-847d-0d86a4cd1023,DISK] 2024-12-10T16:34:54,798 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733848490481/wal.1733848494782 2024-12-10T16:34:54,799 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44417:44417),(127.0.0.1/127.0.0.1:36795:36795),(127.0.0.1/127.0.0.1:42633:42633)] 2024-12-10T16:34:54,799 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 29aef6281497cde7be319b98d41a5734, NAME => 'testCompactedBulkLoadedFiles,,1733848490482.29aef6281497cde7be319b98d41a5734.', STARTKEY => '', ENDKEY => ''} 2024-12-10T16:34:54,799 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testCompactedBulkLoadedFiles,,1733848490482.29aef6281497cde7be319b98d41a5734.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T16:34:54,799 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 29aef6281497cde7be319b98d41a5734 2024-12-10T16:34:54,799 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 29aef6281497cde7be319b98d41a5734 2024-12-10T16:34:54,800 INFO [StoreOpener-29aef6281497cde7be319b98d41a5734-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 29aef6281497cde7be319b98d41a5734 2024-12-10T16:34:54,801 INFO [StoreOpener-29aef6281497cde7be319b98d41a5734-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 29aef6281497cde7be319b98d41a5734 columnFamilyName a 2024-12-10T16:34:54,801 DEBUG [StoreOpener-29aef6281497cde7be319b98d41a5734-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:54,808 DEBUG [StoreOpener-29aef6281497cde7be319b98d41a5734-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/a/118630957077444988019331911e324d 2024-12-10T16:34:54,812 DEBUG [StoreFileOpener-29aef6281497cde7be319b98d41a5734-a-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 1a5de332c518486692c8a97428037eeb_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-10T16:34:54,812 DEBUG [StoreOpener-29aef6281497cde7be319b98d41a5734-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/a/1a5de332c518486692c8a97428037eeb_SeqId_4_ 2024-12-10T16:34:54,816 DEBUG [StoreFileOpener-29aef6281497cde7be319b98d41a5734-a-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 4e57be73b315479f8ee001f98dbc7ccb_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-10T16:34:54,816 DEBUG [StoreOpener-29aef6281497cde7be319b98d41a5734-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/a/4e57be73b315479f8ee001f98dbc7ccb_SeqId_4_ 2024-12-10T16:34:54,820 DEBUG [StoreFileOpener-29aef6281497cde7be319b98d41a5734-a-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for bb61ad15012c4d13b679f8d561620aef_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-10T16:34:54,820 DEBUG [StoreOpener-29aef6281497cde7be319b98d41a5734-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/a/bb61ad15012c4d13b679f8d561620aef_SeqId_4_ 2024-12-10T16:34:54,824 DEBUG [StoreOpener-29aef6281497cde7be319b98d41a5734-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/a/e4e77bebf869499086ce61b296d93aa6 2024-12-10T16:34:54,824 WARN [StoreOpener-29aef6281497cde7be319b98d41a5734-1 {}] regionserver.StoreEngine(317): Clearing the compacted storefile hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/a/1a5de332c518486692c8a97428037eeb_SeqId_4_ from org.apache.hadoop.hbase.regionserver.DefaultStoreEngine@54fda18a 2024-12-10T16:34:54,824 WARN [StoreOpener-29aef6281497cde7be319b98d41a5734-1 {}] regionserver.StoreEngine(317): Clearing the compacted storefile hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/a/4e57be73b315479f8ee001f98dbc7ccb_SeqId_4_ from org.apache.hadoop.hbase.regionserver.DefaultStoreEngine@54fda18a 2024-12-10T16:34:54,824 WARN [StoreOpener-29aef6281497cde7be319b98d41a5734-1 {}] regionserver.StoreEngine(317): Clearing the compacted storefile hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/a/bb61ad15012c4d13b679f8d561620aef_SeqId_4_ from org.apache.hadoop.hbase.regionserver.DefaultStoreEngine@54fda18a 2024-12-10T16:34:54,824 WARN [StoreOpener-29aef6281497cde7be319b98d41a5734-1 {}] regionserver.StoreEngine(317): Clearing the compacted storefile hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/a/e4e77bebf869499086ce61b296d93aa6 from org.apache.hadoop.hbase.regionserver.DefaultStoreEngine@54fda18a 2024-12-10T16:34:54,824 DEBUG [StoreOpener-29aef6281497cde7be319b98d41a5734-1 {}] regionserver.StoreEngine(327): Moving the files [hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/a/1a5de332c518486692c8a97428037eeb_SeqId_4_, hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/a/4e57be73b315479f8ee001f98dbc7ccb_SeqId_4_, hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/a/bb61ad15012c4d13b679f8d561620aef_SeqId_4_, hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/a/e4e77bebf869499086ce61b296d93aa6] to archive 2024-12-10T16:34:54,825 DEBUG [StoreOpener-29aef6281497cde7be319b98d41a5734-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T16:34:54,827 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/a/1a5de332c518486692c8a97428037eeb_SeqId_4_ to hdfs://localhost:35477/hbase/archive/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/a/1a5de332c518486692c8a97428037eeb_SeqId_4_ 2024-12-10T16:34:54,827 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/a/4e57be73b315479f8ee001f98dbc7ccb_SeqId_4_ to hdfs://localhost:35477/hbase/archive/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/a/4e57be73b315479f8ee001f98dbc7ccb_SeqId_4_ 2024-12-10T16:34:54,827 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/a/bb61ad15012c4d13b679f8d561620aef_SeqId_4_ to hdfs://localhost:35477/hbase/archive/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/a/bb61ad15012c4d13b679f8d561620aef_SeqId_4_ 2024-12-10T16:34:54,827 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/a/e4e77bebf869499086ce61b296d93aa6 to hdfs://localhost:35477/hbase/archive/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/a/e4e77bebf869499086ce61b296d93aa6 2024-12-10T16:34:54,827 INFO [StoreOpener-29aef6281497cde7be319b98d41a5734-1 {}] regionserver.HStore(327): Store=29aef6281497cde7be319b98d41a5734/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:54,827 INFO [StoreOpener-29aef6281497cde7be319b98d41a5734-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 29aef6281497cde7be319b98d41a5734 2024-12-10T16:34:54,828 INFO [StoreOpener-29aef6281497cde7be319b98d41a5734-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 29aef6281497cde7be319b98d41a5734 columnFamilyName b 2024-12-10T16:34:54,828 DEBUG [StoreOpener-29aef6281497cde7be319b98d41a5734-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:54,829 INFO [StoreOpener-29aef6281497cde7be319b98d41a5734-1 {}] regionserver.HStore(327): Store=29aef6281497cde7be319b98d41a5734/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:54,829 INFO [StoreOpener-29aef6281497cde7be319b98d41a5734-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 29aef6281497cde7be319b98d41a5734 2024-12-10T16:34:54,829 INFO [StoreOpener-29aef6281497cde7be319b98d41a5734-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 29aef6281497cde7be319b98d41a5734 columnFamilyName c 2024-12-10T16:34:54,829 DEBUG [StoreOpener-29aef6281497cde7be319b98d41a5734-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:54,830 INFO [StoreOpener-29aef6281497cde7be319b98d41a5734-1 {}] regionserver.HStore(327): Store=29aef6281497cde7be319b98d41a5734/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:54,830 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 29aef6281497cde7be319b98d41a5734 2024-12-10T16:34:54,831 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734 2024-12-10T16:34:54,832 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734 2024-12-10T16:34:54,832 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/recovered.edits/0000000000000000008 2024-12-10T16:34:54,834 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/recovered.edits/0000000000000000008: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T16:34:54,836 DEBUG [Time-limited test {}] regionserver.HRegion(5836): 29aef6281497cde7be319b98d41a5734 : Replaying compaction marker table_name: "testCompactedBulkLoadedFiles" encoded_region_name: "29aef6281497cde7be319b98d41a5734" family_name: "a" compaction_input: "e4e77bebf869499086ce61b296d93aa6" compaction_input: "4e57be73b315479f8ee001f98dbc7ccb_SeqId_4_" compaction_input: "1a5de332c518486692c8a97428037eeb_SeqId_4_" compaction_input: "bb61ad15012c4d13b679f8d561620aef_SeqId_4_" compaction_output: "118630957077444988019331911e324d" store_home_dir: "a" region_name: "testCompactedBulkLoadedFiles,,1733848490482.29aef6281497cde7be319b98d41a5734." with seqId=9223372036854775807 and lastReplayedOpenRegionSeqId=-1 2024-12-10T16:34:54,836 DEBUG [Time-limited test {}] regionserver.HStore(1354): Completing compaction from the WAL marker 2024-12-10T16:34:54,836 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 0, skipped 2, firstSequenceIdInLog=3, maxSequenceIdInLog=8, path=hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/recovered.edits/0000000000000000008 2024-12-10T16:34:54,837 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/recovered.edits/0000000000000000008 2024-12-10T16:34:54,838 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 29aef6281497cde7be319b98d41a5734 2024-12-10T16:34:54,838 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 29aef6281497cde7be319b98d41a5734 2024-12-10T16:34:54,838 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testCompactedBulkLoadedFiles descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-10T16:34:54,840 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 29aef6281497cde7be319b98d41a5734 2024-12-10T16:34:54,842 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35477/hbase/data/default/testCompactedBulkLoadedFiles/29aef6281497cde7be319b98d41a5734/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-10T16:34:54,843 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 29aef6281497cde7be319b98d41a5734; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71316562, jitterRate=0.0626995861530304}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-10T16:34:54,843 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 29aef6281497cde7be319b98d41a5734: Writing region info on filesystem at 1733848494799Initializing all the Stores at 1733848494800 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848494800Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848494800Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848494800Cleaning up temporary data from old regions at 1733848494838 (+38 ms)Region opened successfully at 1733848494843 (+5 ms) 2024-12-10T16:34:54,845 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 29aef6281497cde7be319b98d41a5734, disabling compactions & flushes 2024-12-10T16:34:54,846 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testCompactedBulkLoadedFiles,,1733848490482.29aef6281497cde7be319b98d41a5734. 2024-12-10T16:34:54,846 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testCompactedBulkLoadedFiles,,1733848490482.29aef6281497cde7be319b98d41a5734. 2024-12-10T16:34:54,846 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testCompactedBulkLoadedFiles,,1733848490482.29aef6281497cde7be319b98d41a5734. after waiting 0 ms 2024-12-10T16:34:54,846 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testCompactedBulkLoadedFiles,,1733848490482.29aef6281497cde7be319b98d41a5734. 2024-12-10T16:34:54,846 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testCompactedBulkLoadedFiles,,1733848490482.29aef6281497cde7be319b98d41a5734. 2024-12-10T16:34:54,846 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 29aef6281497cde7be319b98d41a5734: Waiting for close lock at 1733848494845Disabling compacts and flushes for region at 1733848494845Disabling writes for close at 1733848494846 (+1 ms)Writing region close event to WAL at 1733848494846Closed at 1733848494846 2024-12-10T16:34:54,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741924_1103 (size=95) 2024-12-10T16:34:54,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741924_1103 (size=95) 2024-12-10T16:34:54,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741924_1103 (size=95) 2024-12-10T16:34:54,851 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-10T16:34:54,851 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1733848494782) 2024-12-10T16:34:54,863 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testCompactedBulkLoadedFiles Thread=449 (was 431) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_60807040_22 at /127.0.0.1:42196 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_60807040_22 at /127.0.0.1:49438 [Waiting for operation #11] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-26-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_60807040_22 at /127.0.0.1:56932 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1319272049) connection to localhost/127.0.0.1:35477 from jenkinstestCompactedBulkLoadedFiles java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: AsyncFSWAL-26-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-26-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkinstestCompactedBulkLoadedFiles@localhost:35477 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1348 (was 1265) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=154 (was 158), ProcessCount=11 (was 11), AvailableMemoryMB=4819 (was 4836) 2024-12-10T16:34:54,864 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1348 is superior to 1024 2024-12-10T16:34:54,876 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsWrittenViaHRegion Thread=449, OpenFileDescriptor=1348, MaxFileDescriptor=1048576, SystemLoadAverage=154, ProcessCount=11, AvailableMemoryMB=4819 2024-12-10T16:34:54,876 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1348 is superior to 1024 2024-12-10T16:34:54,890 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T16:34:54,892 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T16:34:54,892 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T16:34:54,895 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-48968384, suffix=, logDir=hdfs://localhost:35477/hbase/WALs/hregion-48968384, archiveDir=hdfs://localhost:35477/hbase/oldWALs, maxLogs=32 2024-12-10T16:34:54,907 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-48968384/hregion-48968384.1733848494895, exclude list is [], retry=0 2024-12-10T16:34:54,910 DEBUG [AsyncFSWAL-28-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46873,DS-ddfb4a98-1093-42ea-847d-0d86a4cd1023,DISK] 2024-12-10T16:34:54,911 DEBUG [AsyncFSWAL-28-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42039,DS-c05f9a42-583c-4f58-bba4-77e13705c0bc,DISK] 2024-12-10T16:34:54,911 DEBUG [AsyncFSWAL-28-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43913,DS-3efadbaf-2f12-4fe8-9fd8-91ba1926bf09,DISK] 2024-12-10T16:34:54,912 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-48968384/hregion-48968384.1733848494895 2024-12-10T16:34:54,913 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42633:42633),(127.0.0.1/127.0.0.1:44417:44417),(127.0.0.1/127.0.0.1:36795:36795)] 2024-12-10T16:34:54,913 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => acfd00aa37f6ae85778dac72ffddca3f, NAME => 'testReplayEditsWrittenViaHRegion,,1733848494890.acfd00aa37f6ae85778dac72ffddca3f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenViaHRegion', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35477/hbase 2024-12-10T16:34:54,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741926_1105 (size=67) 2024-12-10T16:34:54,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741926_1105 (size=67) 2024-12-10T16:34:54,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741926_1105 (size=67) 2024-12-10T16:34:54,922 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1733848494890.acfd00aa37f6ae85778dac72ffddca3f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T16:34:54,924 INFO [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region acfd00aa37f6ae85778dac72ffddca3f 2024-12-10T16:34:54,925 INFO [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region acfd00aa37f6ae85778dac72ffddca3f columnFamilyName a 2024-12-10T16:34:54,925 DEBUG [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:54,925 INFO [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] regionserver.HStore(327): Store=acfd00aa37f6ae85778dac72ffddca3f/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:54,925 INFO [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region acfd00aa37f6ae85778dac72ffddca3f 2024-12-10T16:34:54,927 INFO [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region acfd00aa37f6ae85778dac72ffddca3f columnFamilyName b 2024-12-10T16:34:54,927 DEBUG [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:54,927 INFO [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] regionserver.HStore(327): Store=acfd00aa37f6ae85778dac72ffddca3f/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:54,927 INFO [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region acfd00aa37f6ae85778dac72ffddca3f 2024-12-10T16:34:54,928 INFO [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region acfd00aa37f6ae85778dac72ffddca3f columnFamilyName c 2024-12-10T16:34:54,929 DEBUG [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:54,929 INFO [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] regionserver.HStore(327): Store=acfd00aa37f6ae85778dac72ffddca3f/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:54,929 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for acfd00aa37f6ae85778dac72ffddca3f 2024-12-10T16:34:54,930 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f 2024-12-10T16:34:54,930 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f 2024-12-10T16:34:54,931 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for acfd00aa37f6ae85778dac72ffddca3f 2024-12-10T16:34:54,931 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for acfd00aa37f6ae85778dac72ffddca3f 2024-12-10T16:34:54,931 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-10T16:34:54,932 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for acfd00aa37f6ae85778dac72ffddca3f 2024-12-10T16:34:54,934 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T16:34:54,935 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened acfd00aa37f6ae85778dac72ffddca3f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68019754, jitterRate=0.013573318719863892}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-10T16:34:54,935 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for acfd00aa37f6ae85778dac72ffddca3f: Writing region info on filesystem at 1733848494922Initializing all the Stores at 1733848494923 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848494923Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848494923Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848494923Cleaning up temporary data from old regions at 1733848494931 (+8 ms)Region opened successfully at 1733848494935 (+4 ms) 2024-12-10T16:34:54,935 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing acfd00aa37f6ae85778dac72ffddca3f, disabling compactions & flushes 2024-12-10T16:34:54,935 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1733848494890.acfd00aa37f6ae85778dac72ffddca3f. 2024-12-10T16:34:54,935 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1733848494890.acfd00aa37f6ae85778dac72ffddca3f. 2024-12-10T16:34:54,935 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1733848494890.acfd00aa37f6ae85778dac72ffddca3f. after waiting 0 ms 2024-12-10T16:34:54,935 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1733848494890.acfd00aa37f6ae85778dac72ffddca3f. 2024-12-10T16:34:54,936 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1733848494890.acfd00aa37f6ae85778dac72ffddca3f. 2024-12-10T16:34:54,936 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for acfd00aa37f6ae85778dac72ffddca3f: Waiting for close lock at 1733848494935Disabling compacts and flushes for region at 1733848494935Disabling writes for close at 1733848494935Writing region close event to WAL at 1733848494936 (+1 ms)Closed at 1733848494936 2024-12-10T16:34:54,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741925_1104 (size=95) 2024-12-10T16:34:54,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741925_1104 (size=95) 2024-12-10T16:34:54,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741925_1104 (size=95) 2024-12-10T16:34:54,940 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-10T16:34:54,940 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-48968384:(num 1733848494895) 2024-12-10T16:34:54,940 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-10T16:34:54,942 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:35477/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733848494889, archiveDir=hdfs://localhost:35477/hbase/oldWALs, maxLogs=32 2024-12-10T16:34:54,953 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733848494889/wal.1733848494942, exclude list is [], retry=0 2024-12-10T16:34:54,955 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46873,DS-ddfb4a98-1093-42ea-847d-0d86a4cd1023,DISK] 2024-12-10T16:34:54,955 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42039,DS-c05f9a42-583c-4f58-bba4-77e13705c0bc,DISK] 2024-12-10T16:34:54,956 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43913,DS-3efadbaf-2f12-4fe8-9fd8-91ba1926bf09,DISK] 2024-12-10T16:34:54,957 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733848494889/wal.1733848494942 2024-12-10T16:34:54,957 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42633:42633),(127.0.0.1/127.0.0.1:44417:44417),(127.0.0.1/127.0.0.1:36795:36795)] 2024-12-10T16:34:54,957 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => acfd00aa37f6ae85778dac72ffddca3f, NAME => 'testReplayEditsWrittenViaHRegion,,1733848494890.acfd00aa37f6ae85778dac72ffddca3f.', STARTKEY => '', ENDKEY => ''} 2024-12-10T16:34:54,957 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1733848494890.acfd00aa37f6ae85778dac72ffddca3f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T16:34:54,958 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for acfd00aa37f6ae85778dac72ffddca3f 2024-12-10T16:34:54,958 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for acfd00aa37f6ae85778dac72ffddca3f 2024-12-10T16:34:54,959 INFO [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region acfd00aa37f6ae85778dac72ffddca3f 2024-12-10T16:34:54,960 INFO [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region acfd00aa37f6ae85778dac72ffddca3f columnFamilyName a 2024-12-10T16:34:54,960 DEBUG [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:54,960 INFO [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] regionserver.HStore(327): Store=acfd00aa37f6ae85778dac72ffddca3f/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:54,960 INFO [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region acfd00aa37f6ae85778dac72ffddca3f 2024-12-10T16:34:54,961 INFO [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region acfd00aa37f6ae85778dac72ffddca3f columnFamilyName b 2024-12-10T16:34:54,961 DEBUG [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:54,962 INFO [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] regionserver.HStore(327): Store=acfd00aa37f6ae85778dac72ffddca3f/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:54,962 INFO [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region acfd00aa37f6ae85778dac72ffddca3f 2024-12-10T16:34:54,962 INFO [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region acfd00aa37f6ae85778dac72ffddca3f columnFamilyName c 2024-12-10T16:34:54,962 DEBUG [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:54,963 INFO [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] regionserver.HStore(327): Store=acfd00aa37f6ae85778dac72ffddca3f/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:54,963 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for acfd00aa37f6ae85778dac72ffddca3f 2024-12-10T16:34:54,963 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f 2024-12-10T16:34:54,964 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f 2024-12-10T16:34:54,965 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for acfd00aa37f6ae85778dac72ffddca3f 2024-12-10T16:34:54,965 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for acfd00aa37f6ae85778dac72ffddca3f 2024-12-10T16:34:54,966 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-10T16:34:54,967 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for acfd00aa37f6ae85778dac72ffddca3f 2024-12-10T16:34:54,968 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened acfd00aa37f6ae85778dac72ffddca3f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73259579, jitterRate=0.09165279567241669}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-10T16:34:54,968 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for acfd00aa37f6ae85778dac72ffddca3f: Writing region info on filesystem at 1733848494958Initializing all the Stores at 1733848494958Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848494959 (+1 ms)Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848494959Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848494959Cleaning up temporary data from old regions at 1733848494965 (+6 ms)Region opened successfully at 1733848494968 (+3 ms) 2024-12-10T16:34:54,975 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing acfd00aa37f6ae85778dac72ffddca3f 3/3 column families, dataSize=870 B heapSize=2.31 KB 2024-12-10T16:34:54,989 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/.tmp/a/277f7b282aa94effb0c75407c2667211 is 91, key is testReplayEditsWrittenViaHRegion/a:x0/1733848494968/Put/seqid=0 2024-12-10T16:34:54,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741928_1107 (size=5958) 2024-12-10T16:34:54,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741928_1107 (size=5958) 2024-12-10T16:34:54,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741928_1107 (size=5958) 2024-12-10T16:34:54,996 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/.tmp/a/277f7b282aa94effb0c75407c2667211 2024-12-10T16:34:55,001 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/.tmp/a/277f7b282aa94effb0c75407c2667211 as hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/a/277f7b282aa94effb0c75407c2667211 2024-12-10T16:34:55,007 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/a/277f7b282aa94effb0c75407c2667211, entries=10, sequenceid=13, filesize=5.8 K 2024-12-10T16:34:55,008 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~870 B/870, heapSize ~1.80 KB/1840, currentSize=0 B/0 for acfd00aa37f6ae85778dac72ffddca3f in 33ms, sequenceid=13, compaction requested=false 2024-12-10T16:34:55,008 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for acfd00aa37f6ae85778dac72ffddca3f: 2024-12-10T16:34:55,026 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing acfd00aa37f6ae85778dac72ffddca3f, disabling compactions & flushes 2024-12-10T16:34:55,026 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1733848494890.acfd00aa37f6ae85778dac72ffddca3f. 2024-12-10T16:34:55,026 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1733848494890.acfd00aa37f6ae85778dac72ffddca3f. 2024-12-10T16:34:55,026 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1733848494890.acfd00aa37f6ae85778dac72ffddca3f. after waiting 0 ms 2024-12-10T16:34:55,026 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1733848494890.acfd00aa37f6ae85778dac72ffddca3f. 2024-12-10T16:34:55,027 ERROR [Time-limited test {}] regionserver.HRegion(1960): Memstore data size is 1740 in region testReplayEditsWrittenViaHRegion,,1733848494890.acfd00aa37f6ae85778dac72ffddca3f. 2024-12-10T16:34:55,027 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1733848494890.acfd00aa37f6ae85778dac72ffddca3f. 2024-12-10T16:34:55,027 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for acfd00aa37f6ae85778dac72ffddca3f: Waiting for close lock at 1733848495026Disabling compacts and flushes for region at 1733848495026Disabling writes for close at 1733848495026Writing region close event to WAL at 1733848495027 (+1 ms)Closed at 1733848495027 2024-12-10T16:34:55,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741927_1106 (size=3346) 2024-12-10T16:34:55,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741927_1106 (size=3346) 2024-12-10T16:34:55,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741927_1106 (size=3346) 2024-12-10T16:34:55,043 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:35477/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733848494889/wal.1733848494942, size=3.3 K (3346bytes) 2024-12-10T16:34:55,043 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35477/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733848494889/wal.1733848494942 2024-12-10T16:34:55,043 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:35477/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733848494889/wal.1733848494942 after 0ms 2024-12-10T16:34:55,045 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:35477/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733848494889/wal.1733848494942: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T16:34:55,046 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:35477/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733848494889/wal.1733848494942 took 3ms 2024-12-10T16:34:55,048 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:35477/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733848494889/wal.1733848494942 so closing down 2024-12-10T16:34:55,048 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-10T16:34:55,048 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1733848494942.temp 2024-12-10T16:34:55,050 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/recovered.edits/0000000000000000003-wal.1733848494942.temp 2024-12-10T16:34:55,050 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-10T16:34:55,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741929_1108 (size=2944) 2024-12-10T16:34:55,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741929_1108 (size=2944) 2024-12-10T16:34:55,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741929_1108 (size=2944) 2024-12-10T16:34:55,056 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/recovered.edits/0000000000000000003-wal.1733848494942.temp (wrote 30 edits, skipped 0 edits in 0 ms) 2024-12-10T16:34:55,057 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/recovered.edits/0000000000000000003-wal.1733848494942.temp to hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/recovered.edits/0000000000000000035 2024-12-10T16:34:55,057 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 32 edits across 1 Regions in 11 ms; skipped=2; WAL=hdfs://localhost:35477/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733848494889/wal.1733848494942, size=3.3 K, length=3346, corrupted=false, cancelled=false 2024-12-10T16:34:55,057 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:35477/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733848494889/wal.1733848494942, journal: Splitting hdfs://localhost:35477/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733848494889/wal.1733848494942, size=3.3 K (3346bytes) at 1733848495043Finishing writing output for hdfs://localhost:35477/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733848494889/wal.1733848494942 so closing down at 1733848495048 (+5 ms)Creating recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/recovered.edits/0000000000000000003-wal.1733848494942.temp at 1733848495050 (+2 ms)3 split writer threads finished at 1733848495050Closed recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/recovered.edits/0000000000000000003-wal.1733848494942.temp (wrote 30 edits, skipped 0 edits in 0 ms) at 1733848495056 (+6 ms)Rename recovered edits hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/recovered.edits/0000000000000000003-wal.1733848494942.temp to hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/recovered.edits/0000000000000000035 at 1733848495057 (+1 ms)Processed 32 edits across 1 Regions in 11 ms; skipped=2; WAL=hdfs://localhost:35477/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733848494889/wal.1733848494942, size=3.3 K, length=3346, corrupted=false, cancelled=false at 1733848495057 2024-12-10T16:34:55,059 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:35477/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733848494889/wal.1733848494942 to hdfs://localhost:35477/hbase/oldWALs/wal.1733848494942 2024-12-10T16:34:55,060 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/recovered.edits/0000000000000000035 2024-12-10T16:34:55,060 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-10T16:34:55,061 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:35477/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733848494889, archiveDir=hdfs://localhost:35477/hbase/oldWALs, maxLogs=32 2024-12-10T16:34:55,076 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733848494889/wal.1733848495062, exclude list is [], retry=0 2024-12-10T16:34:55,078 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42039,DS-c05f9a42-583c-4f58-bba4-77e13705c0bc,DISK] 2024-12-10T16:34:55,078 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46873,DS-ddfb4a98-1093-42ea-847d-0d86a4cd1023,DISK] 2024-12-10T16:34:55,079 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43913,DS-3efadbaf-2f12-4fe8-9fd8-91ba1926bf09,DISK] 2024-12-10T16:34:55,080 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733848494889/wal.1733848495062 2024-12-10T16:34:55,080 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44417:44417),(127.0.0.1/127.0.0.1:42633:42633),(127.0.0.1/127.0.0.1:36795:36795)] 2024-12-10T16:34:55,080 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => acfd00aa37f6ae85778dac72ffddca3f, NAME => 'testReplayEditsWrittenViaHRegion,,1733848494890.acfd00aa37f6ae85778dac72ffddca3f.', STARTKEY => '', ENDKEY => ''} 2024-12-10T16:34:55,080 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1733848494890.acfd00aa37f6ae85778dac72ffddca3f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T16:34:55,080 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for acfd00aa37f6ae85778dac72ffddca3f 2024-12-10T16:34:55,081 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for acfd00aa37f6ae85778dac72ffddca3f 2024-12-10T16:34:55,082 INFO [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region acfd00aa37f6ae85778dac72ffddca3f 2024-12-10T16:34:55,083 INFO [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region acfd00aa37f6ae85778dac72ffddca3f columnFamilyName a 2024-12-10T16:34:55,083 DEBUG [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:55,088 DEBUG [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/a/277f7b282aa94effb0c75407c2667211 2024-12-10T16:34:55,088 INFO [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] regionserver.HStore(327): Store=acfd00aa37f6ae85778dac72ffddca3f/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:55,089 INFO [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region acfd00aa37f6ae85778dac72ffddca3f 2024-12-10T16:34:55,089 INFO [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region acfd00aa37f6ae85778dac72ffddca3f columnFamilyName b 2024-12-10T16:34:55,090 DEBUG [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:55,090 INFO [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] regionserver.HStore(327): Store=acfd00aa37f6ae85778dac72ffddca3f/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:55,090 INFO [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region acfd00aa37f6ae85778dac72ffddca3f 2024-12-10T16:34:55,091 INFO [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region acfd00aa37f6ae85778dac72ffddca3f columnFamilyName c 2024-12-10T16:34:55,091 DEBUG [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:55,091 INFO [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] regionserver.HStore(327): Store=acfd00aa37f6ae85778dac72ffddca3f/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:55,091 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for acfd00aa37f6ae85778dac72ffddca3f 2024-12-10T16:34:55,092 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f 2024-12-10T16:34:55,093 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f 2024-12-10T16:34:55,094 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/recovered.edits/0000000000000000035 2024-12-10T16:34:55,095 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/recovered.edits/0000000000000000035: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T16:34:55,096 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 20, skipped 10, firstSequenceIdInLog=3, maxSequenceIdInLog=35, path=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/recovered.edits/0000000000000000035 2024-12-10T16:34:55,096 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing acfd00aa37f6ae85778dac72ffddca3f 3/3 column families, dataSize=1.70 KB heapSize=3.88 KB 2024-12-10T16:34:55,110 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/.tmp/b/5a04f7c5130d4894bd6e995efbb50c57 is 91, key is testReplayEditsWrittenViaHRegion/b:x0/1733848495008/Put/seqid=0 2024-12-10T16:34:55,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741931_1110 (size=5958) 2024-12-10T16:34:55,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741931_1110 (size=5958) 2024-12-10T16:34:55,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741931_1110 (size=5958) 2024-12-10T16:34:55,116 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=35 (bloomFilter=true), to=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/.tmp/b/5a04f7c5130d4894bd6e995efbb50c57 2024-12-10T16:34:55,139 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-10T16:34:55,140 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/.tmp/c/7fe029d9542b47868fe0363b59c7dad0 is 91, key is testReplayEditsWrittenViaHRegion/c:x0/1733848495016/Put/seqid=0 2024-12-10T16:34:55,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741932_1111 (size=5958) 2024-12-10T16:34:55,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741932_1111 (size=5958) 2024-12-10T16:34:55,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741932_1111 (size=5958) 2024-12-10T16:34:55,148 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=35 (bloomFilter=true), to=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/.tmp/c/7fe029d9542b47868fe0363b59c7dad0 2024-12-10T16:34:55,154 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/.tmp/b/5a04f7c5130d4894bd6e995efbb50c57 as hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/b/5a04f7c5130d4894bd6e995efbb50c57 2024-12-10T16:34:55,159 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/b/5a04f7c5130d4894bd6e995efbb50c57, entries=10, sequenceid=35, filesize=5.8 K 2024-12-10T16:34:55,160 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/.tmp/c/7fe029d9542b47868fe0363b59c7dad0 as hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/c/7fe029d9542b47868fe0363b59c7dad0 2024-12-10T16:34:55,165 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/c/7fe029d9542b47868fe0363b59c7dad0, entries=10, sequenceid=35, filesize=5.8 K 2024-12-10T16:34:55,165 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.70 KB/1740, heapSize ~3.59 KB/3680, currentSize=0 B/0 for acfd00aa37f6ae85778dac72ffddca3f in 69ms, sequenceid=35, compaction requested=false; wal=null 2024-12-10T16:34:55,166 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/recovered.edits/0000000000000000035 2024-12-10T16:34:55,167 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for acfd00aa37f6ae85778dac72ffddca3f 2024-12-10T16:34:55,167 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for acfd00aa37f6ae85778dac72ffddca3f 2024-12-10T16:34:55,168 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-10T16:34:55,169 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for acfd00aa37f6ae85778dac72ffddca3f 2024-12-10T16:34:55,171 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/recovered.edits/35.seqid, newMaxSeqId=35, maxSeqId=1 2024-12-10T16:34:55,172 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened acfd00aa37f6ae85778dac72ffddca3f; next sequenceid=36; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66126047, jitterRate=-0.014645114541053772}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-10T16:34:55,172 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for acfd00aa37f6ae85778dac72ffddca3f: Writing region info on filesystem at 1733848495081Initializing all the Stores at 1733848495082 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848495082Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848495082Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848495082Obtaining lock to block concurrent updates at 1733848495096 (+14 ms)Preparing flush snapshotting stores in acfd00aa37f6ae85778dac72ffddca3f at 1733848495096Finished memstore snapshotting testReplayEditsWrittenViaHRegion,,1733848494890.acfd00aa37f6ae85778dac72ffddca3f., syncing WAL and waiting on mvcc, flushsize=dataSize=1740, getHeapSize=3920, getOffHeapSize=0, getCellsCount=20 at 1733848495097 (+1 ms)Flushing stores of testReplayEditsWrittenViaHRegion,,1733848494890.acfd00aa37f6ae85778dac72ffddca3f. at 1733848495097Flushing acfd00aa37f6ae85778dac72ffddca3f/b: creating writer at 1733848495097Flushing acfd00aa37f6ae85778dac72ffddca3f/b: appending metadata at 1733848495109 (+12 ms)Flushing acfd00aa37f6ae85778dac72ffddca3f/b: closing flushed file at 1733848495109Flushing acfd00aa37f6ae85778dac72ffddca3f/c: creating writer at 1733848495122 (+13 ms)Flushing acfd00aa37f6ae85778dac72ffddca3f/c: appending metadata at 1733848495139 (+17 ms)Flushing acfd00aa37f6ae85778dac72ffddca3f/c: closing flushed file at 1733848495139Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@e949d9a: reopening flushed file at 1733848495153 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@170a2884: reopening flushed file at 1733848495159 (+6 ms)Finished flush of dataSize ~1.70 KB/1740, heapSize ~3.59 KB/3680, currentSize=0 B/0 for acfd00aa37f6ae85778dac72ffddca3f in 69ms, sequenceid=35, compaction requested=false; wal=null at 1733848495165 (+6 ms)Cleaning up temporary data from old regions at 1733848495167 (+2 ms)Region opened successfully at 1733848495172 (+5 ms) 2024-12-10T16:34:55,239 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:35477/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733848494889/wal.1733848495062, size=0 (0bytes) 2024-12-10T16:34:55,239 WARN [Time-limited test {}] wal.WALSplitter(453): File hdfs://localhost:35477/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733848494889/wal.1733848495062 might be still open, length is 0 2024-12-10T16:34:55,239 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35477/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733848494889/wal.1733848495062 2024-12-10T16:34:55,240 WARN [IPC Server handler 0 on default port 35477 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733848494889/wal.1733848495062 has not been closed. Lease recovery is in progress. RecoveryId = 1112 for block blk_1073741930_1109 2024-12-10T16:34:55,240 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35477/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733848494889/wal.1733848495062 after 1ms 2024-12-10T16:34:56,320 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_545150750_22 at /127.0.0.1:49504 [Receiving block BP-1758511473-172.17.0.3-1733848457790:blk_1073741930_1109] {}] datanode.DataXceiver(331): 127.0.0.1:42039:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49504 dst: /127.0.0.1:42039 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:42039 remote=/127.0.0.1:49504]. Total timeout mills is 60000, 58878 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T16:34:56,321 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_545150750_22 at /127.0.0.1:42262 [Receiving block BP-1758511473-172.17.0.3-1733848457790:blk_1073741930_1109] {}] datanode.DataXceiver(331): 127.0.0.1:46873:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42262 dst: /127.0.0.1:46873 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T16:34:56,321 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_545150750_22 at /127.0.0.1:56978 [Receiving block BP-1758511473-172.17.0.3-1733848457790:blk_1073741930_1109] {}] datanode.DataXceiver(331): 127.0.0.1:43913:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56978 dst: /127.0.0.1:43913 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T16:34:56,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741930_1112 (size=2936) 2024-12-10T16:34:56,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741930_1112 (size=2936) 2024-12-10T16:34:56,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741930_1112 (size=2936) 2024-12-10T16:34:59,242 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:35477/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733848494889/wal.1733848495062 after 4002ms 2024-12-10T16:34:59,246 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:35477/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733848494889/wal.1733848495062: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T16:34:59,246 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:35477/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733848494889/wal.1733848495062 took 4007ms 2024-12-10T16:34:59,250 INFO [Time-limited test {}] wal.WALSplitter(493): EOF from hdfs://localhost:35477/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733848494889/wal.1733848495062; continuing. 2024-12-10T16:34:59,250 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:35477/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733848494889/wal.1733848495062 so closing down 2024-12-10T16:34:59,250 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-12-10T16:34:59,252 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000037-wal.1733848495062.temp 2024-12-10T16:34:59,255 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/recovered.edits/0000000000000000037-wal.1733848495062.temp 2024-12-10T16:34:59,256 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-12-10T16:34:59,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741933_1113 (size=2944) 2024-12-10T16:34:59,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741933_1113 (size=2944) 2024-12-10T16:34:59,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741933_1113 (size=2944) 2024-12-10T16:34:59,265 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/recovered.edits/0000000000000000037-wal.1733848495062.temp (wrote 30 edits, skipped 0 edits in 1 ms) 2024-12-10T16:34:59,266 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/recovered.edits/0000000000000000037-wal.1733848495062.temp to hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/recovered.edits/0000000000000000066 2024-12-10T16:34:59,266 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 30 edits across 1 Regions in 19 ms; skipped=0; WAL=hdfs://localhost:35477/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733848494889/wal.1733848495062, size=0, length=0, corrupted=false, cancelled=false 2024-12-10T16:34:59,266 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:35477/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733848494889/wal.1733848495062, journal: Splitting hdfs://localhost:35477/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733848494889/wal.1733848495062, size=0 (0bytes) at 1733848495239Finishing writing output for hdfs://localhost:35477/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733848494889/wal.1733848495062 so closing down at 1733848499250 (+4011 ms)Creating recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/recovered.edits/0000000000000000037-wal.1733848495062.temp at 1733848499255 (+5 ms)3 split writer threads finished at 1733848499256 (+1 ms)Closed recovered edits writer path=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/recovered.edits/0000000000000000037-wal.1733848495062.temp (wrote 30 edits, skipped 0 edits in 1 ms) at 1733848499265 (+9 ms)Rename recovered edits hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/recovered.edits/0000000000000000037-wal.1733848495062.temp to hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/recovered.edits/0000000000000000066 at 1733848499266 (+1 ms)Processed 30 edits across 1 Regions in 19 ms; skipped=0; WAL=hdfs://localhost:35477/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733848494889/wal.1733848495062, size=0, length=0, corrupted=false, cancelled=false at 1733848499266 2024-12-10T16:34:59,267 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:35477/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733848494889/wal.1733848495062 to hdfs://localhost:35477/hbase/oldWALs/wal.1733848495062 2024-12-10T16:34:59,268 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/recovered.edits/0000000000000000066 2024-12-10T16:34:59,268 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-12-10T16:34:59,270 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:35477/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733848494889, archiveDir=hdfs://localhost:35477/hbase/oldWALs, maxLogs=32 2024-12-10T16:34:59,281 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733848494889/wal.1733848499270, exclude list is [], retry=0 2024-12-10T16:34:59,284 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43913,DS-3efadbaf-2f12-4fe8-9fd8-91ba1926bf09,DISK] 2024-12-10T16:34:59,284 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46873,DS-ddfb4a98-1093-42ea-847d-0d86a4cd1023,DISK] 2024-12-10T16:34:59,284 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42039,DS-c05f9a42-583c-4f58-bba4-77e13705c0bc,DISK] 2024-12-10T16:34:59,285 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733848494889/wal.1733848499270 2024-12-10T16:34:59,285 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36795:36795),(127.0.0.1/127.0.0.1:42633:42633),(127.0.0.1/127.0.0.1:44417:44417)] 2024-12-10T16:34:59,286 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1733848494890.acfd00aa37f6ae85778dac72ffddca3f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T16:34:59,287 INFO [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region acfd00aa37f6ae85778dac72ffddca3f 2024-12-10T16:34:59,288 INFO [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region acfd00aa37f6ae85778dac72ffddca3f columnFamilyName a 2024-12-10T16:34:59,288 DEBUG [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:59,292 DEBUG [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/a/277f7b282aa94effb0c75407c2667211 2024-12-10T16:34:59,292 INFO [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] regionserver.HStore(327): Store=acfd00aa37f6ae85778dac72ffddca3f/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:59,292 INFO [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region acfd00aa37f6ae85778dac72ffddca3f 2024-12-10T16:34:59,293 INFO [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region acfd00aa37f6ae85778dac72ffddca3f columnFamilyName b 2024-12-10T16:34:59,293 DEBUG [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:59,298 DEBUG [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/b/5a04f7c5130d4894bd6e995efbb50c57 2024-12-10T16:34:59,298 INFO [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] regionserver.HStore(327): Store=acfd00aa37f6ae85778dac72ffddca3f/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:59,298 INFO [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region acfd00aa37f6ae85778dac72ffddca3f 2024-12-10T16:34:59,299 INFO [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region acfd00aa37f6ae85778dac72ffddca3f columnFamilyName c 2024-12-10T16:34:59,299 DEBUG [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T16:34:59,304 DEBUG [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/c/7fe029d9542b47868fe0363b59c7dad0 2024-12-10T16:34:59,305 INFO [StoreOpener-acfd00aa37f6ae85778dac72ffddca3f-1 {}] regionserver.HStore(327): Store=acfd00aa37f6ae85778dac72ffddca3f/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T16:34:59,305 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for acfd00aa37f6ae85778dac72ffddca3f 2024-12-10T16:34:59,306 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f 2024-12-10T16:34:59,307 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f 2024-12-10T16:34:59,308 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/recovered.edits/0000000000000000066 2024-12-10T16:34:59,310 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/recovered.edits/0000000000000000066: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-12-10T16:34:59,313 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 30, skipped 0, firstSequenceIdInLog=37, maxSequenceIdInLog=66, path=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/recovered.edits/0000000000000000066 2024-12-10T16:34:59,313 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing acfd00aa37f6ae85778dac72ffddca3f 3/3 column families, dataSize=2.55 KB heapSize=5.44 KB 2024-12-10T16:34:59,327 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/.tmp/a/53e1af77bc4e43c6b26adf34b19afd16 is 91, key is testReplayEditsWrittenViaHRegion/a:y0/1733848495181/Put/seqid=0 2024-12-10T16:34:59,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741935_1115 (size=5958) 2024-12-10T16:34:59,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741935_1115 (size=5958) 2024-12-10T16:34:59,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741935_1115 (size=5958) 2024-12-10T16:34:59,333 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/.tmp/a/53e1af77bc4e43c6b26adf34b19afd16 2024-12-10T16:34:59,351 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/.tmp/b/7337a3696ad44945ae262c9817ad0642 is 91, key is testReplayEditsWrittenViaHRegion/b:y0/1733848495186/Put/seqid=0 2024-12-10T16:34:59,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741936_1116 (size=5958) 2024-12-10T16:34:59,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741936_1116 (size=5958) 2024-12-10T16:34:59,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741936_1116 (size=5958) 2024-12-10T16:34:59,357 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/.tmp/b/7337a3696ad44945ae262c9817ad0642 2024-12-10T16:34:59,374 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/.tmp/c/d226253d0e9e42e9903c3f457a08ca07 is 91, key is testReplayEditsWrittenViaHRegion/c:y0/1733848495192/Put/seqid=0 2024-12-10T16:34:59,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741937_1117 (size=5958) 2024-12-10T16:34:59,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741937_1117 (size=5958) 2024-12-10T16:34:59,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741937_1117 (size=5958) 2024-12-10T16:34:59,381 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/.tmp/c/d226253d0e9e42e9903c3f457a08ca07 2024-12-10T16:34:59,386 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/.tmp/a/53e1af77bc4e43c6b26adf34b19afd16 as hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/a/53e1af77bc4e43c6b26adf34b19afd16 2024-12-10T16:34:59,391 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/a/53e1af77bc4e43c6b26adf34b19afd16, entries=10, sequenceid=66, filesize=5.8 K 2024-12-10T16:34:59,392 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/.tmp/b/7337a3696ad44945ae262c9817ad0642 as hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/b/7337a3696ad44945ae262c9817ad0642 2024-12-10T16:34:59,397 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/b/7337a3696ad44945ae262c9817ad0642, entries=10, sequenceid=66, filesize=5.8 K 2024-12-10T16:34:59,398 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/.tmp/c/d226253d0e9e42e9903c3f457a08ca07 as hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/c/d226253d0e9e42e9903c3f457a08ca07 2024-12-10T16:34:59,402 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/c/d226253d0e9e42e9903c3f457a08ca07, entries=10, sequenceid=66, filesize=5.8 K 2024-12-10T16:34:59,403 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~2.55 KB/2610, heapSize ~5.39 KB/5520, currentSize=0 B/0 for acfd00aa37f6ae85778dac72ffddca3f in 90ms, sequenceid=66, compaction requested=false; wal=null 2024-12-10T16:34:59,403 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/recovered.edits/0000000000000000066 2024-12-10T16:34:59,405 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for acfd00aa37f6ae85778dac72ffddca3f 2024-12-10T16:34:59,405 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for acfd00aa37f6ae85778dac72ffddca3f 2024-12-10T16:34:59,405 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-10T16:34:59,407 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for acfd00aa37f6ae85778dac72ffddca3f 2024-12-10T16:34:59,409 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35477/hbase/data/default/testReplayEditsWrittenViaHRegion/acfd00aa37f6ae85778dac72ffddca3f/recovered.edits/66.seqid, newMaxSeqId=66, maxSeqId=35 2024-12-10T16:34:59,410 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened acfd00aa37f6ae85778dac72ffddca3f; next sequenceid=67; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72008395, jitterRate=0.0730087012052536}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-10T16:34:59,410 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for acfd00aa37f6ae85778dac72ffddca3f: Writing region info on filesystem at 1733848499286Initializing all the Stores at 1733848499287 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848499287Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848499287Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733848499287Obtaining lock to block concurrent updates at 1733848499313 (+26 ms)Preparing flush snapshotting stores in acfd00aa37f6ae85778dac72ffddca3f at 1733848499313Finished memstore snapshotting testReplayEditsWrittenViaHRegion,,1733848494890.acfd00aa37f6ae85778dac72ffddca3f., syncing WAL and waiting on mvcc, flushsize=dataSize=2610, getHeapSize=5520, getOffHeapSize=0, getCellsCount=30 at 1733848499313Flushing stores of testReplayEditsWrittenViaHRegion,,1733848494890.acfd00aa37f6ae85778dac72ffddca3f. at 1733848499313Flushing acfd00aa37f6ae85778dac72ffddca3f/a: creating writer at 1733848499313Flushing acfd00aa37f6ae85778dac72ffddca3f/a: appending metadata at 1733848499326 (+13 ms)Flushing acfd00aa37f6ae85778dac72ffddca3f/a: closing flushed file at 1733848499326Flushing acfd00aa37f6ae85778dac72ffddca3f/b: creating writer at 1733848499338 (+12 ms)Flushing acfd00aa37f6ae85778dac72ffddca3f/b: appending metadata at 1733848499351 (+13 ms)Flushing acfd00aa37f6ae85778dac72ffddca3f/b: closing flushed file at 1733848499351Flushing acfd00aa37f6ae85778dac72ffddca3f/c: creating writer at 1733848499362 (+11 ms)Flushing acfd00aa37f6ae85778dac72ffddca3f/c: appending metadata at 1733848499374 (+12 ms)Flushing acfd00aa37f6ae85778dac72ffddca3f/c: closing flushed file at 1733848499374Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1a14f47: reopening flushed file at 1733848499385 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@50c64663: reopening flushed file at 1733848499391 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5adca800: reopening flushed file at 1733848499397 (+6 ms)Finished flush of dataSize ~2.55 KB/2610, heapSize ~5.39 KB/5520, currentSize=0 B/0 for acfd00aa37f6ae85778dac72ffddca3f in 90ms, sequenceid=66, compaction requested=false; wal=null at 1733848499403 (+6 ms)Cleaning up temporary data from old regions at 1733848499405 (+2 ms)Region opened successfully at 1733848499410 (+5 ms) 2024-12-10T16:34:59,424 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing acfd00aa37f6ae85778dac72ffddca3f, disabling compactions & flushes 2024-12-10T16:34:59,424 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1733848494890.acfd00aa37f6ae85778dac72ffddca3f. 2024-12-10T16:34:59,424 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1733848494890.acfd00aa37f6ae85778dac72ffddca3f. 2024-12-10T16:34:59,424 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1733848494890.acfd00aa37f6ae85778dac72ffddca3f. after waiting 0 ms 2024-12-10T16:34:59,424 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1733848494890.acfd00aa37f6ae85778dac72ffddca3f. 2024-12-10T16:34:59,426 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1733848494890.acfd00aa37f6ae85778dac72ffddca3f. 2024-12-10T16:34:59,426 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for acfd00aa37f6ae85778dac72ffddca3f: Waiting for close lock at 1733848499424Disabling compacts and flushes for region at 1733848499424Disabling writes for close at 1733848499424Writing region close event to WAL at 1733848499426 (+2 ms)Closed at 1733848499426 2024-12-10T16:34:59,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741934_1114 (size=95) 2024-12-10T16:34:59,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741934_1114 (size=95) 2024-12-10T16:34:59,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741934_1114 (size=95) 2024-12-10T16:34:59,430 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-12-10T16:34:59,430 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1733848499270) 2024-12-10T16:34:59,444 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsWrittenViaHRegion Thread=458 (was 449) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-669249948_22 at /127.0.0.1:57024 [Waiting for operation #10] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46845 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1319272049) connection to localhost/127.0.0.1:46845 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: AsyncFSWAL-28-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-669249948_22 at /127.0.0.1:49550 [Waiting for operation #18] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-28-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1319272049) connection to localhost/127.0.0.1:35477 from jenkinstestReplayEditsWrittenViaHRegion java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkinstestReplayEditsWrittenViaHRegion@localhost:35477 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-669249948_22 at /127.0.0.1:42280 [Waiting for operation #15] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-28-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1416 (was 1348) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=149 (was 154), ProcessCount=11 (was 11), AvailableMemoryMB=4780 (was 4819) 2024-12-10T16:34:59,444 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1416 is superior to 1024 2024-12-10T16:34:59,444 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-10T16:34:59,444 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-10T16:34:59,444 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T16:34:59,444 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T16:34:59,445 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T16:34:59,445 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-10T16:34:59,445 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-10T16:34:59,445 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1849544579, stopped=false 2024-12-10T16:34:59,446 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=4b7737f37de9,42829,1733848461149 2024-12-10T16:34:59,478 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40043-0x10010af1c860002, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-10T16:34:59,478 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44673-0x10010af1c860003, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-10T16:34:59,478 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42829-0x10010af1c860000, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-10T16:34:59,478 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40043-0x10010af1c860002, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T16:34:59,478 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42829-0x10010af1c860000, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T16:34:59,478 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44673-0x10010af1c860003, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T16:34:59,478 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-10T16:34:59,479 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-10T16:34:59,479 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44673-0x10010af1c860003, quorum=127.0.0.1:53765, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T16:34:59,479 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T16:34:59,479 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T16:34:59,480 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40043-0x10010af1c860002, quorum=127.0.0.1:53765, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T16:34:59,480 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42829-0x10010af1c860000, quorum=127.0.0.1:53765, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T16:34:59,480 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '4b7737f37de9,40043,1733848461924' ***** 2024-12-10T16:34:59,480 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-10T16:34:59,480 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '4b7737f37de9,44673,1733848461969' ***** 2024-12-10T16:34:59,480 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-10T16:34:59,480 INFO [RS:1;4b7737f37de9:40043 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-10T16:34:59,480 INFO [RS:2;4b7737f37de9:44673 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-10T16:34:59,480 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-10T16:34:59,480 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-10T16:34:59,480 INFO [RS:1;4b7737f37de9:40043 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-10T16:34:59,480 INFO [RS:2;4b7737f37de9:44673 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-10T16:34:59,481 INFO [RS:1;4b7737f37de9:40043 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-10T16:34:59,481 INFO [RS:2;4b7737f37de9:44673 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-10T16:34:59,481 INFO [RS:2;4b7737f37de9:44673 {}] regionserver.HRegionServer(3091): Received CLOSE for 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:59,481 INFO [RS:1;4b7737f37de9:40043 {}] regionserver.HRegionServer(959): stopping server 4b7737f37de9,40043,1733848461924 2024-12-10T16:34:59,481 INFO [RS:1;4b7737f37de9:40043 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-10T16:34:59,481 INFO [RS:1;4b7737f37de9:40043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;4b7737f37de9:40043. 2024-12-10T16:34:59,481 DEBUG [RS:1;4b7737f37de9:40043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T16:34:59,481 DEBUG [RS:1;4b7737f37de9:40043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T16:34:59,481 INFO [RS:2;4b7737f37de9:44673 {}] regionserver.HRegionServer(959): stopping server 4b7737f37de9,44673,1733848461969 2024-12-10T16:34:59,481 INFO [RS:2;4b7737f37de9:44673 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-10T16:34:59,481 INFO [RS:2;4b7737f37de9:44673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;4b7737f37de9:44673. 2024-12-10T16:34:59,481 INFO [RS:1;4b7737f37de9:40043 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-10T16:34:59,481 INFO [RS:1;4b7737f37de9:40043 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-10T16:34:59,481 DEBUG [RS:2;4b7737f37de9:44673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T16:34:59,481 INFO [RS:1;4b7737f37de9:40043 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-10T16:34:59,481 DEBUG [RS:2;4b7737f37de9:44673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T16:34:59,481 DEBUG [RS_CLOSE_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 3c4a0fab46c2c0263b38e45209989070, disabling compactions & flushes 2024-12-10T16:34:59,481 INFO [RS:1;4b7737f37de9:40043 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-10T16:34:59,481 INFO [RS:2;4b7737f37de9:44673 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-10T16:34:59,481 INFO [RS_CLOSE_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. 2024-12-10T16:34:59,481 DEBUG [RS:2;4b7737f37de9:44673 {}] regionserver.HRegionServer(1325): Online Regions={3c4a0fab46c2c0263b38e45209989070=testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070.} 2024-12-10T16:34:59,481 DEBUG [RS_CLOSE_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. 2024-12-10T16:34:59,481 DEBUG [RS:2;4b7737f37de9:44673 {}] regionserver.HRegionServer(1351): Waiting on 3c4a0fab46c2c0263b38e45209989070 2024-12-10T16:34:59,481 DEBUG [RS_CLOSE_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. after waiting 0 ms 2024-12-10T16:34:59,481 DEBUG [RS_CLOSE_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. 2024-12-10T16:34:59,482 INFO [RS:1;4b7737f37de9:40043 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-10T16:34:59,482 DEBUG [RS:1;4b7737f37de9:40043 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-10T16:34:59,482 DEBUG [RS:1;4b7737f37de9:40043 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-10T16:34:59,482 DEBUG [RS_CLOSE_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-10T16:34:59,482 INFO [RS_CLOSE_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-10T16:34:59,482 DEBUG [RS_CLOSE_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-10T16:34:59,482 DEBUG [RS_CLOSE_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-10T16:34:59,482 DEBUG [RS_CLOSE_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-10T16:34:59,482 INFO [RS_CLOSE_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=6.86 KB heapSize=11.45 KB 2024-12-10T16:34:59,486 DEBUG [RS_CLOSE_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/default/testReplayEditsAfterRegionMovedWithMultiCF/3c4a0fab46c2c0263b38e45209989070/recovered.edits/20.seqid, newMaxSeqId=20, maxSeqId=17 2024-12-10T16:34:59,486 INFO [RS_CLOSE_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. 2024-12-10T16:34:59,487 DEBUG [RS_CLOSE_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 3c4a0fab46c2c0263b38e45209989070: Waiting for close lock at 1733848499481Running coprocessor pre-close hooks at 1733848499481Disabling compacts and flushes for region at 1733848499481Disabling writes for close at 1733848499481Writing region close event to WAL at 1733848499482 (+1 ms)Running coprocessor post-close hooks at 1733848499486 (+4 ms)Closed at 1733848499486 2024-12-10T16:34:59,487 DEBUG [RS_CLOSE_REGION-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070. 2024-12-10T16:34:59,504 INFO [regionserver/4b7737f37de9:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-10T16:34:59,504 INFO [regionserver/4b7737f37de9:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-10T16:34:59,506 DEBUG [RS_CLOSE_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/hbase/meta/1588230740/.tmp/info/d4fde926663f484bbf839c9aed4a9134 is 205, key is testReplayEditsAfterRegionMovedWithMultiCF,,1733848476562.3c4a0fab46c2c0263b38e45209989070./info:regioninfo/1733848479641/Put/seqid=0 2024-12-10T16:34:59,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741938_1118 (size=8243) 2024-12-10T16:34:59,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741938_1118 (size=8243) 2024-12-10T16:34:59,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741938_1118 (size=8243) 2024-12-10T16:34:59,513 INFO [RS_CLOSE_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.65 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/hbase/meta/1588230740/.tmp/info/d4fde926663f484bbf839c9aed4a9134 2024-12-10T16:34:59,531 DEBUG [RS_CLOSE_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/hbase/meta/1588230740/.tmp/ns/a41d19532b034870b80bfa8fdf14dcf4 is 43, key is default/ns:d/1733848464189/Put/seqid=0 2024-12-10T16:34:59,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741939_1119 (size=5153) 2024-12-10T16:34:59,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741939_1119 (size=5153) 2024-12-10T16:34:59,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741939_1119 (size=5153) 2024-12-10T16:34:59,537 INFO [RS_CLOSE_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/hbase/meta/1588230740/.tmp/ns/a41d19532b034870b80bfa8fdf14dcf4 2024-12-10T16:34:59,555 DEBUG [RS_CLOSE_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/hbase/meta/1588230740/.tmp/table/273039d3972946f8987bd1d89c350098 is 78, key is testReplayEditsAfterRegionMovedWithMultiCF/table:state/1733848476982/Put/seqid=0 2024-12-10T16:34:59,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741940_1120 (size=5431) 2024-12-10T16:34:59,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741940_1120 (size=5431) 2024-12-10T16:34:59,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741940_1120 (size=5431) 2024-12-10T16:34:59,562 INFO [RS_CLOSE_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=148 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/hbase/meta/1588230740/.tmp/table/273039d3972946f8987bd1d89c350098 2024-12-10T16:34:59,568 DEBUG [RS_CLOSE_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/hbase/meta/1588230740/.tmp/info/d4fde926663f484bbf839c9aed4a9134 as hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/hbase/meta/1588230740/info/d4fde926663f484bbf839c9aed4a9134 2024-12-10T16:34:59,568 INFO [regionserver/4b7737f37de9:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T16:34:59,569 INFO [regionserver/4b7737f37de9:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T16:34:59,573 INFO [RS_CLOSE_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/hbase/meta/1588230740/info/d4fde926663f484bbf839c9aed4a9134, entries=18, sequenceid=21, filesize=8.0 K 2024-12-10T16:34:59,574 DEBUG [RS_CLOSE_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/hbase/meta/1588230740/.tmp/ns/a41d19532b034870b80bfa8fdf14dcf4 as hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/hbase/meta/1588230740/ns/a41d19532b034870b80bfa8fdf14dcf4 2024-12-10T16:34:59,579 INFO [RS_CLOSE_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/hbase/meta/1588230740/ns/a41d19532b034870b80bfa8fdf14dcf4, entries=2, sequenceid=21, filesize=5.0 K 2024-12-10T16:34:59,580 DEBUG [RS_CLOSE_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/hbase/meta/1588230740/.tmp/table/273039d3972946f8987bd1d89c350098 as hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/hbase/meta/1588230740/table/273039d3972946f8987bd1d89c350098 2024-12-10T16:34:59,585 INFO [RS_CLOSE_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/hbase/meta/1588230740/table/273039d3972946f8987bd1d89c350098, entries=2, sequenceid=21, filesize=5.3 K 2024-12-10T16:34:59,586 INFO [RS_CLOSE_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~6.86 KB/7029, heapSize ~11.16 KB/11424, currentSize=0 B/0 for 1588230740 in 104ms, sequenceid=21, compaction requested=false 2024-12-10T16:34:59,590 DEBUG [RS_CLOSE_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-12-10T16:34:59,591 DEBUG [RS_CLOSE_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-10T16:34:59,591 INFO [RS_CLOSE_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-10T16:34:59,591 DEBUG [RS_CLOSE_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733848499482Running coprocessor pre-close hooks at 1733848499482Disabling compacts and flushes for region at 1733848499482Disabling writes for close at 1733848499482Obtaining lock to block concurrent updates at 1733848499482Preparing flush snapshotting stores in 1588230740 at 1733848499482Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=7029, getHeapSize=11664, getOffHeapSize=0, getCellsCount=48 at 1733848499482Flushing stores of hbase:meta,,1.1588230740 at 1733848499483 (+1 ms)Flushing 1588230740/info: creating writer at 1733848499483Flushing 1588230740/info: appending metadata at 1733848499505 (+22 ms)Flushing 1588230740/info: closing flushed file at 1733848499505Flushing 1588230740/ns: creating writer at 1733848499518 (+13 ms)Flushing 1588230740/ns: appending metadata at 1733848499531 (+13 ms)Flushing 1588230740/ns: closing flushed file at 1733848499531Flushing 1588230740/table: creating writer at 1733848499543 (+12 ms)Flushing 1588230740/table: appending metadata at 1733848499555 (+12 ms)Flushing 1588230740/table: closing flushed file at 1733848499555Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@41b935f0: reopening flushed file at 1733848499567 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4045dd62: reopening flushed file at 1733848499573 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@72f50c2: reopening flushed file at 1733848499580 (+7 ms)Finished flush of dataSize ~6.86 KB/7029, heapSize ~11.16 KB/11424, currentSize=0 B/0 for 1588230740 in 104ms, sequenceid=21, compaction requested=false at 1733848499586 (+6 ms)Writing region close event to WAL at 1733848499587 (+1 ms)Running coprocessor post-close hooks at 1733848499591 (+4 ms)Closed at 1733848499591 2024-12-10T16:34:59,591 DEBUG [RS_CLOSE_META-regionserver/4b7737f37de9:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-10T16:34:59,682 INFO [RS:2;4b7737f37de9:44673 {}] regionserver.HRegionServer(976): stopping server 4b7737f37de9,44673,1733848461969; all regions closed. 2024-12-10T16:34:59,682 INFO [RS:1;4b7737f37de9:40043 {}] regionserver.HRegionServer(976): stopping server 4b7737f37de9,40043,1733848461924; all regions closed. 2024-12-10T16:34:59,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741836_1012 (size=4718) 2024-12-10T16:34:59,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741836_1012 (size=4718) 2024-12-10T16:34:59,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741836_1012 (size=4718) 2024-12-10T16:34:59,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741833_1009 (size=723) 2024-12-10T16:34:59,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741833_1009 (size=723) 2024-12-10T16:34:59,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741833_1009 (size=723) 2024-12-10T16:34:59,689 DEBUG [RS:2;4b7737f37de9:44673 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/oldWALs 2024-12-10T16:34:59,689 INFO [RS:2;4b7737f37de9:44673 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 4b7737f37de9%2C44673%2C1733848461969:(num 1733848463581) 2024-12-10T16:34:59,689 DEBUG [RS:2;4b7737f37de9:44673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T16:34:59,689 DEBUG [RS:1;4b7737f37de9:40043 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/oldWALs 2024-12-10T16:34:59,689 INFO [RS:2;4b7737f37de9:44673 {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T16:34:59,689 INFO [RS:1;4b7737f37de9:40043 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 4b7737f37de9%2C40043%2C1733848461924.meta:.meta(num 1733848464030) 2024-12-10T16:34:59,689 INFO [RS:2;4b7737f37de9:44673 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-10T16:34:59,689 INFO [RS:2;4b7737f37de9:44673 {}] hbase.ChoreService(370): Chore service for: regionserver/4b7737f37de9:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-10T16:34:59,689 INFO [RS:2;4b7737f37de9:44673 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-10T16:34:59,689 INFO [regionserver/4b7737f37de9:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-10T16:34:59,689 INFO [RS:2;4b7737f37de9:44673 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-10T16:34:59,690 INFO [RS:2;4b7737f37de9:44673 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-10T16:34:59,690 INFO [RS:2;4b7737f37de9:44673 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-10T16:34:59,690 INFO [RS:2;4b7737f37de9:44673 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:44673 2024-12-10T16:34:59,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741834_1010 (size=1711) 2024-12-10T16:34:59,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741834_1010 (size=1711) 2024-12-10T16:34:59,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741834_1010 (size=1711) 2024-12-10T16:34:59,696 DEBUG [RS:1;4b7737f37de9:40043 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/oldWALs 2024-12-10T16:34:59,696 INFO [RS:1;4b7737f37de9:40043 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 4b7737f37de9%2C40043%2C1733848461924:(num 1733848463581) 2024-12-10T16:34:59,696 DEBUG [RS:1;4b7737f37de9:40043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T16:34:59,696 INFO [RS:1;4b7737f37de9:40043 {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T16:34:59,696 INFO [RS:1;4b7737f37de9:40043 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-10T16:34:59,696 INFO [RS:1;4b7737f37de9:40043 {}] hbase.ChoreService(370): Chore service for: regionserver/4b7737f37de9:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-10T16:34:59,696 INFO [RS:1;4b7737f37de9:40043 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-10T16:34:59,696 INFO [regionserver/4b7737f37de9:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-10T16:34:59,696 INFO [RS:1;4b7737f37de9:40043 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:40043 2024-12-10T16:34:59,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42829-0x10010af1c860000, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-10T16:34:59,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44673-0x10010af1c860003, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/4b7737f37de9,44673,1733848461969 2024-12-10T16:34:59,700 INFO [RS:2;4b7737f37de9:44673 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-10T16:34:59,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40043-0x10010af1c860002, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/4b7737f37de9,40043,1733848461924 2024-12-10T16:34:59,708 INFO [RS:1;4b7737f37de9:40043 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-10T16:34:59,708 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [4b7737f37de9,40043,1733848461924] 2024-12-10T16:34:59,725 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/4b7737f37de9,40043,1733848461924 already deleted, retry=false 2024-12-10T16:34:59,725 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 4b7737f37de9,40043,1733848461924 expired; onlineServers=1 2024-12-10T16:34:59,725 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [4b7737f37de9,44673,1733848461969] 2024-12-10T16:34:59,736 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/4b7737f37de9,44673,1733848461969 already deleted, retry=false 2024-12-10T16:34:59,736 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 4b7737f37de9,44673,1733848461969 expired; onlineServers=0 2024-12-10T16:34:59,736 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '4b7737f37de9,42829,1733848461149' ***** 2024-12-10T16:34:59,736 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-10T16:34:59,736 INFO [M:0;4b7737f37de9:42829 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-10T16:34:59,737 INFO [M:0;4b7737f37de9:42829 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-10T16:34:59,737 DEBUG [M:0;4b7737f37de9:42829 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-10T16:34:59,737 DEBUG [M:0;4b7737f37de9:42829 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-10T16:34:59,737 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-10T16:34:59,737 DEBUG [master/4b7737f37de9:0:becomeActiveMaster-HFileCleaner.large.0-1733848463231 {}] cleaner.HFileCleaner(306): Exit Thread[master/4b7737f37de9:0:becomeActiveMaster-HFileCleaner.large.0-1733848463231,5,FailOnTimeoutGroup] 2024-12-10T16:34:59,737 DEBUG [master/4b7737f37de9:0:becomeActiveMaster-HFileCleaner.small.0-1733848463232 {}] cleaner.HFileCleaner(306): Exit Thread[master/4b7737f37de9:0:becomeActiveMaster-HFileCleaner.small.0-1733848463232,5,FailOnTimeoutGroup] 2024-12-10T16:34:59,738 INFO [M:0;4b7737f37de9:42829 {}] hbase.ChoreService(370): Chore service for: master/4b7737f37de9:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-10T16:34:59,738 INFO [M:0;4b7737f37de9:42829 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-10T16:34:59,738 DEBUG [M:0;4b7737f37de9:42829 {}] master.HMaster(1795): Stopping service threads 2024-12-10T16:34:59,738 INFO [M:0;4b7737f37de9:42829 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-10T16:34:59,738 INFO [M:0;4b7737f37de9:42829 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-10T16:34:59,739 INFO [M:0;4b7737f37de9:42829 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-10T16:34:59,740 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-10T16:34:59,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42829-0x10010af1c860000, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-10T16:34:59,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42829-0x10010af1c860000, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T16:34:59,745 DEBUG [M:0;4b7737f37de9:42829 {}] zookeeper.ZKUtil(347): master:42829-0x10010af1c860000, quorum=127.0.0.1:53765, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-10T16:34:59,745 WARN [M:0;4b7737f37de9:42829 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-10T16:34:59,746 INFO [M:0;4b7737f37de9:42829 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/.lastflushedseqids 2024-12-10T16:34:59,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741941_1121 (size=138) 2024-12-10T16:34:59,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741941_1121 (size=138) 2024-12-10T16:34:59,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741941_1121 (size=138) 2024-12-10T16:34:59,762 INFO [M:0;4b7737f37de9:42829 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-10T16:34:59,763 INFO [M:0;4b7737f37de9:42829 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-10T16:34:59,763 DEBUG [M:0;4b7737f37de9:42829 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-10T16:34:59,763 INFO [M:0;4b7737f37de9:42829 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T16:34:59,763 DEBUG [M:0;4b7737f37de9:42829 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T16:34:59,763 DEBUG [M:0;4b7737f37de9:42829 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-10T16:34:59,763 DEBUG [M:0;4b7737f37de9:42829 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T16:34:59,763 INFO [M:0;4b7737f37de9:42829 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=68.36 KB heapSize=83.73 KB 2024-12-10T16:34:59,782 DEBUG [M:0;4b7737f37de9:42829 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5d47616bc3fc428fa737e73c8d071a30 is 82, key is hbase:meta,,1/info:regioninfo/1733848464114/Put/seqid=0 2024-12-10T16:34:59,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741942_1122 (size=5672) 2024-12-10T16:34:59,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741942_1122 (size=5672) 2024-12-10T16:34:59,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741942_1122 (size=5672) 2024-12-10T16:34:59,788 INFO [M:0;4b7737f37de9:42829 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5d47616bc3fc428fa737e73c8d071a30 2024-12-10T16:34:59,805 DEBUG [M:0;4b7737f37de9:42829 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/aa5b8c82ff6b4a7caf2fd1189e338bce is 1077, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733848476988/Put/seqid=0 2024-12-10T16:34:59,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741943_1123 (size=7756) 2024-12-10T16:34:59,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741943_1123 (size=7756) 2024-12-10T16:34:59,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741943_1123 (size=7756) 2024-12-10T16:34:59,812 INFO [M:0;4b7737f37de9:42829 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=67.62 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/aa5b8c82ff6b4a7caf2fd1189e338bce 2024-12-10T16:34:59,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44673-0x10010af1c860003, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T16:34:59,817 INFO [RS:2;4b7737f37de9:44673 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-10T16:34:59,817 INFO [RS:2;4b7737f37de9:44673 {}] regionserver.HRegionServer(1031): Exiting; stopping=4b7737f37de9,44673,1733848461969; zookeeper connection closed. 2024-12-10T16:34:59,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44673-0x10010af1c860003, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T16:34:59,817 INFO [M:0;4b7737f37de9:42829 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for aa5b8c82ff6b4a7caf2fd1189e338bce 2024-12-10T16:34:59,817 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1c1e0ed7 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1c1e0ed7 2024-12-10T16:34:59,825 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40043-0x10010af1c860002, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T16:34:59,825 INFO [RS:1;4b7737f37de9:40043 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-10T16:34:59,825 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40043-0x10010af1c860002, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T16:34:59,825 INFO [RS:1;4b7737f37de9:40043 {}] regionserver.HRegionServer(1031): Exiting; stopping=4b7737f37de9,40043,1733848461924; zookeeper connection closed. 2024-12-10T16:34:59,826 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@fc9c32e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@fc9c32e 2024-12-10T16:34:59,826 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-10T16:34:59,834 DEBUG [M:0;4b7737f37de9:42829 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ec31ee08ab5d41eaa1fe61b668e9caca is 69, key is 4b7737f37de9,40043,1733848461924/rs:state/1733848463325/Put/seqid=0 2024-12-10T16:34:59,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741944_1124 (size=5440) 2024-12-10T16:34:59,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741944_1124 (size=5440) 2024-12-10T16:34:59,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741944_1124 (size=5440) 2024-12-10T16:34:59,841 INFO [M:0;4b7737f37de9:42829 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=249 B at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ec31ee08ab5d41eaa1fe61b668e9caca 2024-12-10T16:34:59,844 INFO [M:0;4b7737f37de9:42829 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for ec31ee08ab5d41eaa1fe61b668e9caca 2024-12-10T16:34:59,845 DEBUG [M:0;4b7737f37de9:42829 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5d47616bc3fc428fa737e73c8d071a30 as hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5d47616bc3fc428fa737e73c8d071a30 2024-12-10T16:34:59,850 INFO [M:0;4b7737f37de9:42829 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5d47616bc3fc428fa737e73c8d071a30, entries=8, sequenceid=168, filesize=5.5 K 2024-12-10T16:34:59,851 DEBUG [M:0;4b7737f37de9:42829 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/aa5b8c82ff6b4a7caf2fd1189e338bce as hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/aa5b8c82ff6b4a7caf2fd1189e338bce 2024-12-10T16:34:59,855 INFO [M:0;4b7737f37de9:42829 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for aa5b8c82ff6b4a7caf2fd1189e338bce 2024-12-10T16:34:59,855 INFO [M:0;4b7737f37de9:42829 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/aa5b8c82ff6b4a7caf2fd1189e338bce, entries=17, sequenceid=168, filesize=7.6 K 2024-12-10T16:34:59,856 DEBUG [M:0;4b7737f37de9:42829 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ec31ee08ab5d41eaa1fe61b668e9caca as hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ec31ee08ab5d41eaa1fe61b668e9caca 2024-12-10T16:34:59,861 INFO [M:0;4b7737f37de9:42829 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for ec31ee08ab5d41eaa1fe61b668e9caca 2024-12-10T16:34:59,861 INFO [M:0;4b7737f37de9:42829 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35477/user/jenkins/test-data/f9e51fb6-55ec-d605-004f-84aec8d20d9f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ec31ee08ab5d41eaa1fe61b668e9caca, entries=3, sequenceid=168, filesize=5.3 K 2024-12-10T16:34:59,862 INFO [M:0;4b7737f37de9:42829 {}] regionserver.HRegion(3140): Finished flush of dataSize ~68.36 KB/69996, heapSize ~83.44 KB/85440, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 99ms, sequenceid=168, compaction requested=false 2024-12-10T16:34:59,863 INFO [M:0;4b7737f37de9:42829 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T16:34:59,863 DEBUG [M:0;4b7737f37de9:42829 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733848499763Disabling compacts and flushes for region at 1733848499763Disabling writes for close at 1733848499763Obtaining lock to block concurrent updates at 1733848499763Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733848499763Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=69996, getHeapSize=85680, getOffHeapSize=0, getCellsCount=195 at 1733848499764 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733848499764Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733848499764Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733848499781 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733848499781Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733848499792 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733848499805 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733848499805Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733848499817 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733848499834 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733848499834Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@f99438e: reopening flushed file at 1733848499844 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@152f240f: reopening flushed file at 1733848499850 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@73294feb: reopening flushed file at 1733848499856 (+6 ms)Finished flush of dataSize ~68.36 KB/69996, heapSize ~83.44 KB/85440, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 99ms, sequenceid=168, compaction requested=false at 1733848499862 (+6 ms)Writing region close event to WAL at 1733848499863 (+1 ms)Closed at 1733848499863 2024-12-10T16:34:59,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46873 is added to blk_1073741830_1006 (size=56557) 2024-12-10T16:34:59,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43913 is added to blk_1073741830_1006 (size=56557) 2024-12-10T16:34:59,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741830_1006 (size=56557) 2024-12-10T16:34:59,866 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-10T16:34:59,866 INFO [M:0;4b7737f37de9:42829 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-10T16:34:59,867 INFO [M:0;4b7737f37de9:42829 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:42829 2024-12-10T16:34:59,867 INFO [M:0;4b7737f37de9:42829 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-10T16:34:59,975 INFO [M:0;4b7737f37de9:42829 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-10T16:34:59,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42829-0x10010af1c860000, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T16:34:59,976 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42829-0x10010af1c860000, quorum=127.0.0.1:53765, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T16:34:59,986 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733848479994/wal.1733848480285 with renewLeaseKey: DEFAULT_16655 java.io.FileNotFoundException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733848479994/wal.1733848480285 (inode 16655) Holder DFSClient_NONMAPREDUCE_545150750_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1733848479994/wal.1733848480285 (inode 16655) Holder DFSClient_NONMAPREDUCE_545150750_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 25 more 2024-12-10T16:34:59,987 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1733848472168/wal.1733848472276 with renewLeaseKey: DEFAULT_16586 java.io.IOException: stream already broken at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.endBlock(FanOutOneBlockAsyncDFSOutput.java:566) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:615) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T16:34:59,987 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1733848494889/wal.1733848495062 with renewLeaseKey: DEFAULT_16767 java.io.IOException: stream already broken at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.endBlock(FanOutOneBlockAsyncDFSOutput.java:566) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:615) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T16:34:59,989 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733848480408/wal.1733848489652 with renewLeaseKey: DEFAULT_16678 java.io.FileNotFoundException: File does not exist: /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733848480408/wal.1733848489652 (inode 16678) Holder DFSClient_NONMAPREDUCE_545150750_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1733848480408/wal.1733848489652 (inode 16678) Holder DFSClient_NONMAPREDUCE_545150750_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 25 more 2024-12-10T16:34:59,990 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1733848465287/wal.1733848465356 with renewLeaseKey: DEFAULT_16506 java.io.IOException: stream already broken at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.endBlock(FanOutOneBlockAsyncDFSOutput.java:566) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:615) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T16:34:59,993 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testdatalosswheninputerror-manual,16010,1733848489847/wal.1733848490317 with renewLeaseKey: DEFAULT_16704 java.io.FileNotFoundException: File does not exist: /hbase/WALs/testdatalosswheninputerror-manual,16010,1733848489847/wal.1733848490317 (inode 16704) Holder DFSClient_NONMAPREDUCE_545150750_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testdatalosswheninputerror-manual,16010,1733848489847/wal.1733848490317 (inode 16704) Holder DFSClient_NONMAPREDUCE_545150750_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 25 more 2024-12-10T16:34:59,994 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1733848490481/wal.1733848490542 with renewLeaseKey: DEFAULT_16726 java.io.IOException: stream already broken at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.endBlock(FanOutOneBlockAsyncDFSOutput.java:566) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:615) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T16:34:59,996 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testnameconflictwhensplit1-manual,16010,1733848464886/wal.1733848465163 with renewLeaseKey: DEFAULT_16485 java.io.FileNotFoundException: File does not exist: /hbase/WALs/testnameconflictwhensplit1-manual,16010,1733848464886/wal.1733848465163 (inode 16485) Holder DFSClient_NONMAPREDUCE_545150750_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testnameconflictwhensplit1-manual,16010,1733848464886/wal.1733848465163 (inode 16485) Holder DFSClient_NONMAPREDUCE_545150750_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 25 more 2024-12-10T16:34:59,998 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testnameconflictwhensplit0-manual,16010,1733848464414/wal.1733848464648 with renewLeaseKey: DEFAULT_16462 java.io.FileNotFoundException: File does not exist: /hbase/WALs/testnameconflictwhensplit0-manual,16010,1733848464414/wal.1733848464648 (inode 16462) Holder DFSClient_NONMAPREDUCE_545150750_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testnameconflictwhensplit0-manual,16010,1733848464414/wal.1733848464648 (inode 16462) Holder DFSClient_NONMAPREDUCE_545150750_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 25 more 2024-12-10T16:35:00,001 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@35f1150e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T16:35:00,003 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@13a77e13{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-10T16:35:00,003 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-10T16:35:00,004 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@f9972d0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-10T16:35:00,004 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2a6d5e13{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/hadoop.log.dir/,STOPPED} 2024-12-10T16:35:00,006 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-10T16:35:00,006 WARN [BP-1758511473-172.17.0.3-1733848457790 heartbeating to localhost/127.0.0.1:35477 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-10T16:35:00,006 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-10T16:35:00,006 WARN [BP-1758511473-172.17.0.3-1733848457790 heartbeating to localhost/127.0.0.1:35477 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1758511473-172.17.0.3-1733848457790 (Datanode Uuid 75c93e06-6591-4732-9326-3e4c05ccc6c9) service to localhost/127.0.0.1:35477 2024-12-10T16:35:00,008 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/cluster_2abc8474-d2bc-97ae-7082-3f3155e63df1/data/data5/current/BP-1758511473-172.17.0.3-1733848457790 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T16:35:00,008 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/cluster_2abc8474-d2bc-97ae-7082-3f3155e63df1/data/data6/current/BP-1758511473-172.17.0.3-1733848457790 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T16:35:00,009 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-10T16:35:00,011 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7bd427b8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T16:35:00,011 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6915083f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-10T16:35:00,011 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-10T16:35:00,011 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5cc2d6b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-10T16:35:00,011 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@46b092e1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/hadoop.log.dir/,STOPPED} 2024-12-10T16:35:00,012 WARN [BP-1758511473-172.17.0.3-1733848457790 heartbeating to localhost/127.0.0.1:35477 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-10T16:35:00,012 WARN [BP-1758511473-172.17.0.3-1733848457790 heartbeating to localhost/127.0.0.1:35477 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1758511473-172.17.0.3-1733848457790 (Datanode Uuid 532bd787-74b5-4b08-b35e-f1360f5ce582) service to localhost/127.0.0.1:35477 2024-12-10T16:35:00,013 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/cluster_2abc8474-d2bc-97ae-7082-3f3155e63df1/data/data3/current/BP-1758511473-172.17.0.3-1733848457790 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T16:35:00,013 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/cluster_2abc8474-d2bc-97ae-7082-3f3155e63df1/data/data4/current/BP-1758511473-172.17.0.3-1733848457790 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T16:35:00,013 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-10T16:35:00,013 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-10T16:35:00,014 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-10T16:35:00,017 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@330740de{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T16:35:00,017 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7b24cab9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-10T16:35:00,017 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-10T16:35:00,017 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a359997{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-10T16:35:00,017 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@cf5a85e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/hadoop.log.dir/,STOPPED} 2024-12-10T16:35:00,018 WARN [BP-1758511473-172.17.0.3-1733848457790 heartbeating to localhost/127.0.0.1:35477 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-10T16:35:00,018 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-10T16:35:00,018 WARN [BP-1758511473-172.17.0.3-1733848457790 heartbeating to localhost/127.0.0.1:35477 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1758511473-172.17.0.3-1733848457790 (Datanode Uuid 017dd4e6-ac3c-4432-a7b1-b0f7f2dba114) service to localhost/127.0.0.1:35477 2024-12-10T16:35:00,018 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-10T16:35:00,019 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/cluster_2abc8474-d2bc-97ae-7082-3f3155e63df1/data/data1/current/BP-1758511473-172.17.0.3-1733848457790 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T16:35:00,019 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/cluster_2abc8474-d2bc-97ae-7082-3f3155e63df1/data/data2/current/BP-1758511473-172.17.0.3-1733848457790 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T16:35:00,019 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-10T16:35:00,025 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3717288f{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-10T16:35:00,026 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4bd70930{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-10T16:35:00,026 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-10T16:35:00,026 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6dc9d5c1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-10T16:35:00,026 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4f37ffca{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8fcc2af3-88db-5a1d-1956-6fa90523b9a1/hadoop.log.dir/,STOPPED} 2024-12-10T16:35:00,036 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-10T16:35:00,093 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down