2024-11-13 10:25:35,505 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@20b2475a 2024-11-13 10:25:35,521 main DEBUG Took 0.013433 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-13 10:25:35,521 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-13 10:25:35,521 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-13 10:25:35,522 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-13 10:25:35,524 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 10:25:35,532 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-13 10:25:35,546 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 10:25:35,548 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 10:25:35,549 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 10:25:35,549 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 10:25:35,550 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 10:25:35,550 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 10:25:35,551 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 10:25:35,552 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 10:25:35,552 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 10:25:35,553 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 10:25:35,554 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 10:25:35,554 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 10:25:35,555 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 10:25:35,556 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 10:25:35,556 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 10:25:35,557 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 10:25:35,557 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 10:25:35,558 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 10:25:35,558 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 10:25:35,559 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 10:25:35,560 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 10:25:35,560 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 10:25:35,561 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 10:25:35,561 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 10:25:35,562 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 10:25:35,562 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-13 10:25:35,564 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 10:25:35,566 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-13 10:25:35,568 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-13 10:25:35,569 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-13 10:25:35,570 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-13 10:25:35,571 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-13 10:25:35,583 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-13 10:25:35,586 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-13 10:25:35,588 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-13 10:25:35,589 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-13 10:25:35,589 main DEBUG createAppenders(={Console}) 2024-11-13 10:25:35,590 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@20b2475a initialized 2024-11-13 10:25:35,591 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@20b2475a 2024-11-13 10:25:35,591 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@20b2475a OK. 2024-11-13 10:25:35,592 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-13 10:25:35,592 main DEBUG OutputStream closed 2024-11-13 10:25:35,592 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-13 10:25:35,593 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-13 10:25:35,593 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@4310d43 OK 2024-11-13 10:25:35,661 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-13 10:25:35,663 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-13 10:25:35,664 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-13 10:25:35,666 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-13 10:25:35,666 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-13 10:25:35,667 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-13 10:25:35,667 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-13 10:25:35,668 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-13 10:25:35,668 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-13 10:25:35,669 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-13 10:25:35,669 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-13 10:25:35,670 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-13 10:25:35,670 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-13 10:25:35,671 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-13 10:25:35,671 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-13 10:25:35,671 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-13 10:25:35,672 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-13 10:25:35,672 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-13 10:25:35,674 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-13 10:25:35,675 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@12f9af83) with optional ClassLoader: null 2024-11-13 10:25:35,675 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-13 10:25:35,676 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@12f9af83] started OK. 2024-11-13T10:25:35,933 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12 2024-11-13 10:25:35,936 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-13 10:25:35,937 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-13T10:25:35,947 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay timeout: 13 mins 2024-11-13T10:25:35,953 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplayValueCompression timeout: 13 mins 2024-11-13T10:25:35,973 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-13T10:25:36,017 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-13T10:25:36,017 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-13T10:25:36,032 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-13T10:25:36,052 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/cluster_048ca973-e213-4f85-e39d-c51a07fc85b5, deleteOnExit=true 2024-11-13T10:25:36,052 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-13T10:25:36,053 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/test.cache.data in system properties and HBase conf 2024-11-13T10:25:36,054 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/hadoop.tmp.dir in system properties and HBase conf 2024-11-13T10:25:36,054 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/hadoop.log.dir in system properties and HBase conf 2024-11-13T10:25:36,055 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-13T10:25:36,055 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-13T10:25:36,056 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-13T10:25:36,150 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-13T10:25:36,260 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-13T10:25:36,265 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-13T10:25:36,266 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-13T10:25:36,267 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-13T10:25:36,268 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T10:25:36,268 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-13T10:25:36,269 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-13T10:25:36,270 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T10:25:36,270 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T10:25:36,271 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-13T10:25:36,272 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/nfs.dump.dir in system properties and HBase conf 2024-11-13T10:25:36,272 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/java.io.tmpdir in system properties and HBase conf 2024-11-13T10:25:36,273 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T10:25:36,273 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-13T10:25:36,274 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-13T10:25:37,204 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-13T10:25:37,302 INFO [Time-limited test {}] log.Log(170): Logging initialized @2623ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-13T10:25:37,399 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T10:25:37,489 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T10:25:37,519 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T10:25:37,519 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T10:25:37,521 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T10:25:37,539 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T10:25:37,542 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4f37ffca{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/hadoop.log.dir/,AVAILABLE} 2024-11-13T10:25:37,544 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6dc9d5c1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T10:25:37,790 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3717288f{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/java.io.tmpdir/jetty-localhost-38753-hadoop-hdfs-3_4_1-tests_jar-_-any-7013627310314822141/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T10:25:37,796 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4bd70930{HTTP/1.1, (http/1.1)}{localhost:38753} 2024-11-13T10:25:37,797 INFO [Time-limited test {}] server.Server(415): Started @3118ms 2024-11-13T10:25:38,246 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T10:25:38,256 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T10:25:38,258 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T10:25:38,258 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T10:25:38,258 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T10:25:38,259 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1f76f489{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/hadoop.log.dir/,AVAILABLE} 2024-11-13T10:25:38,260 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@433df981{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T10:25:38,383 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@36632d60{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/java.io.tmpdir/jetty-localhost-40909-hadoop-hdfs-3_4_1-tests_jar-_-any-80435838426693282/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T10:25:38,384 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@751d2fa4{HTTP/1.1, (http/1.1)}{localhost:40909} 2024-11-13T10:25:38,384 INFO [Time-limited test {}] server.Server(415): Started @3706ms 2024-11-13T10:25:38,445 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T10:25:38,622 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T10:25:38,630 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T10:25:38,632 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T10:25:38,632 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T10:25:38,632 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T10:25:38,636 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4b5fc47c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/hadoop.log.dir/,AVAILABLE} 2024-11-13T10:25:38,637 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ebbf344{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T10:25:38,807 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4546bb60{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/java.io.tmpdir/jetty-localhost-34297-hadoop-hdfs-3_4_1-tests_jar-_-any-16652368465806931821/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T10:25:38,808 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7c7d32f8{HTTP/1.1, (http/1.1)}{localhost:34297} 2024-11-13T10:25:38,809 INFO [Time-limited test {}] server.Server(415): Started @4130ms 2024-11-13T10:25:38,811 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T10:25:38,874 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T10:25:38,879 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T10:25:38,881 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T10:25:38,881 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T10:25:38,882 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T10:25:38,885 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7da22a2e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/hadoop.log.dir/,AVAILABLE} 2024-11-13T10:25:38,886 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3f079a76{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T10:25:39,006 WARN [Thread-106 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/cluster_048ca973-e213-4f85-e39d-c51a07fc85b5/data/data3/current/BP-357896810-172.17.0.2-1731493536919/current, will proceed with Du for space computation calculation, 2024-11-13T10:25:39,006 WARN [Thread-108 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/cluster_048ca973-e213-4f85-e39d-c51a07fc85b5/data/data4/current/BP-357896810-172.17.0.2-1731493536919/current, will proceed with Du for space computation calculation, 2024-11-13T10:25:39,006 WARN [Thread-105 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/cluster_048ca973-e213-4f85-e39d-c51a07fc85b5/data/data1/current/BP-357896810-172.17.0.2-1731493536919/current, will proceed with Du for space computation calculation, 2024-11-13T10:25:39,008 WARN [Thread-107 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/cluster_048ca973-e213-4f85-e39d-c51a07fc85b5/data/data2/current/BP-357896810-172.17.0.2-1731493536919/current, will proceed with Du for space computation calculation, 2024-11-13T10:25:39,045 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@43206bef{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/java.io.tmpdir/jetty-localhost-34397-hadoop-hdfs-3_4_1-tests_jar-_-any-13944706514126727615/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T10:25:39,046 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@228ffa29{HTTP/1.1, (http/1.1)}{localhost:34397} 2024-11-13T10:25:39,047 INFO [Time-limited test {}] server.Server(415): Started @4368ms 2024-11-13T10:25:39,049 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T10:25:39,067 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T10:25:39,068 WARN [Thread-82 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T10:25:39,158 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf966cf6c3abca88f with lease ID 0x6a98361ec6a2fbfb: Processing first storage report for DS-2ac43560-8e20-498a-852c-1b3a1f0157e9 from datanode DatanodeRegistration(127.0.0.1:45097, datanodeUuid=74f818aa-bde0-444c-be58-669bd3a13313, infoPort=42913, infoSecurePort=0, ipcPort=35595, storageInfo=lv=-57;cid=testClusterID;nsid=530937701;c=1731493536919) 2024-11-13T10:25:39,159 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf966cf6c3abca88f with lease ID 0x6a98361ec6a2fbfb: from storage DS-2ac43560-8e20-498a-852c-1b3a1f0157e9 node DatanodeRegistration(127.0.0.1:45097, datanodeUuid=74f818aa-bde0-444c-be58-669bd3a13313, infoPort=42913, infoSecurePort=0, ipcPort=35595, storageInfo=lv=-57;cid=testClusterID;nsid=530937701;c=1731493536919), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-13T10:25:39,160 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf6df64bd0da9f537 with lease ID 0x6a98361ec6a2fbfc: Processing first storage report for DS-1af27950-119b-421a-9580-8a6fa7d1ebb0 from datanode DatanodeRegistration(127.0.0.1:44787, datanodeUuid=2aff3888-7015-4ad7-9cba-7d6b1e8abc2a, infoPort=35745, infoSecurePort=0, ipcPort=43163, storageInfo=lv=-57;cid=testClusterID;nsid=530937701;c=1731493536919) 2024-11-13T10:25:39,160 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf6df64bd0da9f537 with lease ID 0x6a98361ec6a2fbfc: from storage DS-1af27950-119b-421a-9580-8a6fa7d1ebb0 node DatanodeRegistration(127.0.0.1:44787, datanodeUuid=2aff3888-7015-4ad7-9cba-7d6b1e8abc2a, infoPort=35745, infoSecurePort=0, ipcPort=43163, storageInfo=lv=-57;cid=testClusterID;nsid=530937701;c=1731493536919), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-13T10:25:39,160 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf966cf6c3abca88f with lease ID 0x6a98361ec6a2fbfb: Processing first storage report for DS-4e4bf74c-a7ad-42ec-b41c-4e47e5157934 from datanode DatanodeRegistration(127.0.0.1:45097, datanodeUuid=74f818aa-bde0-444c-be58-669bd3a13313, infoPort=42913, infoSecurePort=0, ipcPort=35595, storageInfo=lv=-57;cid=testClusterID;nsid=530937701;c=1731493536919) 2024-11-13T10:25:39,161 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf966cf6c3abca88f with lease ID 0x6a98361ec6a2fbfb: from storage DS-4e4bf74c-a7ad-42ec-b41c-4e47e5157934 node DatanodeRegistration(127.0.0.1:45097, datanodeUuid=74f818aa-bde0-444c-be58-669bd3a13313, infoPort=42913, infoSecurePort=0, ipcPort=35595, storageInfo=lv=-57;cid=testClusterID;nsid=530937701;c=1731493536919), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T10:25:39,161 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf6df64bd0da9f537 with lease ID 0x6a98361ec6a2fbfc: Processing first storage report for DS-6b7f7be9-7b95-4b3b-82c3-b088766b5d60 from datanode DatanodeRegistration(127.0.0.1:44787, datanodeUuid=2aff3888-7015-4ad7-9cba-7d6b1e8abc2a, infoPort=35745, infoSecurePort=0, ipcPort=43163, storageInfo=lv=-57;cid=testClusterID;nsid=530937701;c=1731493536919) 2024-11-13T10:25:39,161 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf6df64bd0da9f537 with lease ID 0x6a98361ec6a2fbfc: from storage DS-6b7f7be9-7b95-4b3b-82c3-b088766b5d60 node DatanodeRegistration(127.0.0.1:44787, datanodeUuid=2aff3888-7015-4ad7-9cba-7d6b1e8abc2a, infoPort=35745, infoSecurePort=0, ipcPort=43163, storageInfo=lv=-57;cid=testClusterID;nsid=530937701;c=1731493536919), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T10:25:39,215 WARN [Thread-139 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/cluster_048ca973-e213-4f85-e39d-c51a07fc85b5/data/data5/current/BP-357896810-172.17.0.2-1731493536919/current, will proceed with Du for space computation calculation, 2024-11-13T10:25:39,217 WARN [Thread-140 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/cluster_048ca973-e213-4f85-e39d-c51a07fc85b5/data/data6/current/BP-357896810-172.17.0.2-1731493536919/current, will proceed with Du for space computation calculation, 2024-11-13T10:25:39,249 WARN [Thread-129 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T10:25:39,258 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x35ae74da15948b91 with lease ID 0x6a98361ec6a2fbfd: Processing first storage report for DS-4caa984b-780d-4d07-9178-6a891c1e8e45 from datanode DatanodeRegistration(127.0.0.1:38649, datanodeUuid=65e57e12-06ba-4c67-8670-361c7b675aed, infoPort=45409, infoSecurePort=0, ipcPort=42871, storageInfo=lv=-57;cid=testClusterID;nsid=530937701;c=1731493536919) 2024-11-13T10:25:39,258 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x35ae74da15948b91 with lease ID 0x6a98361ec6a2fbfd: from storage DS-4caa984b-780d-4d07-9178-6a891c1e8e45 node DatanodeRegistration(127.0.0.1:38649, datanodeUuid=65e57e12-06ba-4c67-8670-361c7b675aed, infoPort=45409, infoSecurePort=0, ipcPort=42871, storageInfo=lv=-57;cid=testClusterID;nsid=530937701;c=1731493536919), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T10:25:39,259 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x35ae74da15948b91 with lease ID 0x6a98361ec6a2fbfd: Processing first storage report for DS-959160a0-4209-4211-bede-10e91b06594b from datanode DatanodeRegistration(127.0.0.1:38649, datanodeUuid=65e57e12-06ba-4c67-8670-361c7b675aed, infoPort=45409, infoSecurePort=0, ipcPort=42871, storageInfo=lv=-57;cid=testClusterID;nsid=530937701;c=1731493536919) 2024-11-13T10:25:39,259 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x35ae74da15948b91 with lease ID 0x6a98361ec6a2fbfd: from storage DS-959160a0-4209-4211-bede-10e91b06594b node DatanodeRegistration(127.0.0.1:38649, datanodeUuid=65e57e12-06ba-4c67-8670-361c7b675aed, infoPort=45409, infoSecurePort=0, ipcPort=42871, storageInfo=lv=-57;cid=testClusterID;nsid=530937701;c=1731493536919), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T10:25:39,507 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12 2024-11-13T10:25:39,634 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/cluster_048ca973-e213-4f85-e39d-c51a07fc85b5/zookeeper_0, clientPort=51925, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/cluster_048ca973-e213-4f85-e39d-c51a07fc85b5/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/cluster_048ca973-e213-4f85-e39d-c51a07fc85b5/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-13T10:25:39,648 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51925 2024-11-13T10:25:39,693 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T10:25:39,698 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T10:25:39,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741825_1001 (size=7) 2024-11-13T10:25:39,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741825_1001 (size=7) 2024-11-13T10:25:39,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741825_1001 (size=7) 2024-11-13T10:25:40,378 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510 with version=8 2024-11-13T10:25:40,378 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/hbase-staging 2024-11-13T10:25:40,792 INFO [Time-limited test {}] client.ConnectionUtils(128): master/770665a7984d:0 server-side Connection retries=45 2024-11-13T10:25:40,804 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T10:25:40,805 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T10:25:40,810 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T10:25:40,810 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T10:25:40,810 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T10:25:40,995 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-13T10:25:41,067 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-13T10:25:41,077 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-13T10:25:41,081 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T10:25:41,112 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 5068 (auto-detected) 2024-11-13T10:25:41,114 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-13T10:25:41,139 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45401 2024-11-13T10:25:41,163 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45401 connecting to ZooKeeper ensemble=127.0.0.1:51925 2024-11-13T10:25:41,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:454010x0, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T10:25:41,206 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45401-0x10110dc99880000 connected 2024-11-13T10:25:41,245 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T10:25:41,249 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T10:25:41,264 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45401-0x10110dc99880000, quorum=127.0.0.1:51925, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T10:25:41,269 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510, hbase.cluster.distributed=false 2024-11-13T10:25:41,302 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45401-0x10110dc99880000, quorum=127.0.0.1:51925, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T10:25:41,315 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45401 2024-11-13T10:25:41,320 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45401 2024-11-13T10:25:41,341 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45401 2024-11-13T10:25:41,346 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45401 2024-11-13T10:25:41,346 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45401 2024-11-13T10:25:41,488 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/770665a7984d:0 server-side Connection retries=45 2024-11-13T10:25:41,490 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T10:25:41,490 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T10:25:41,490 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T10:25:41,490 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T10:25:41,490 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T10:25:41,493 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-13T10:25:41,495 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T10:25:41,496 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44657 2024-11-13T10:25:41,498 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44657 connecting to ZooKeeper ensemble=127.0.0.1:51925 2024-11-13T10:25:41,499 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T10:25:41,502 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T10:25:41,508 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:446570x0, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T10:25:41,509 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44657-0x10110dc99880001 connected 2024-11-13T10:25:41,509 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44657-0x10110dc99880001, quorum=127.0.0.1:51925, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T10:25:41,513 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-13T10:25:41,524 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-13T10:25:41,528 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44657-0x10110dc99880001, quorum=127.0.0.1:51925, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-13T10:25:41,535 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44657-0x10110dc99880001, quorum=127.0.0.1:51925, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T10:25:41,536 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44657 2024-11-13T10:25:41,536 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44657 2024-11-13T10:25:41,537 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44657 2024-11-13T10:25:41,538 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44657 2024-11-13T10:25:41,538 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44657 2024-11-13T10:25:41,562 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/770665a7984d:0 server-side Connection retries=45 2024-11-13T10:25:41,563 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T10:25:41,563 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T10:25:41,563 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T10:25:41,564 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T10:25:41,564 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T10:25:41,564 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-13T10:25:41,564 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T10:25:41,566 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36821 2024-11-13T10:25:41,568 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36821 connecting to ZooKeeper ensemble=127.0.0.1:51925 2024-11-13T10:25:41,569 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T10:25:41,572 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T10:25:41,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:368210x0, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T10:25:41,580 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36821-0x10110dc99880002 connected 2024-11-13T10:25:41,580 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36821-0x10110dc99880002, quorum=127.0.0.1:51925, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T10:25:41,581 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-13T10:25:41,584 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-13T10:25:41,585 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36821-0x10110dc99880002, quorum=127.0.0.1:51925, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-13T10:25:41,588 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36821-0x10110dc99880002, quorum=127.0.0.1:51925, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T10:25:41,590 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36821 2024-11-13T10:25:41,591 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36821 2024-11-13T10:25:41,592 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36821 2024-11-13T10:25:41,600 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36821 2024-11-13T10:25:41,601 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36821 2024-11-13T10:25:41,628 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/770665a7984d:0 server-side Connection retries=45 2024-11-13T10:25:41,628 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T10:25:41,628 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T10:25:41,629 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T10:25:41,629 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T10:25:41,629 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T10:25:41,629 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-13T10:25:41,630 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T10:25:41,631 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46143 2024-11-13T10:25:41,633 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46143 connecting to ZooKeeper ensemble=127.0.0.1:51925 2024-11-13T10:25:41,635 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T10:25:41,638 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T10:25:41,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:461430x0, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T10:25:41,646 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:461430x0, quorum=127.0.0.1:51925, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T10:25:41,646 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-13T10:25:41,649 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46143-0x10110dc99880003 connected 2024-11-13T10:25:41,650 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-13T10:25:41,651 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46143-0x10110dc99880003, quorum=127.0.0.1:51925, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-13T10:25:41,654 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46143-0x10110dc99880003, quorum=127.0.0.1:51925, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T10:25:41,657 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46143 2024-11-13T10:25:41,657 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46143 2024-11-13T10:25:41,660 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46143 2024-11-13T10:25:41,668 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46143 2024-11-13T10:25:41,668 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46143 2024-11-13T10:25:41,695 DEBUG [M:0;770665a7984d:45401 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;770665a7984d:45401 2024-11-13T10:25:41,696 INFO [master/770665a7984d:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/770665a7984d,45401,1731493540547 2024-11-13T10:25:41,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44657-0x10110dc99880001, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T10:25:41,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46143-0x10110dc99880003, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T10:25:41,709 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36821-0x10110dc99880002, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T10:25:41,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45401-0x10110dc99880000, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T10:25:41,715 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45401-0x10110dc99880000, quorum=127.0.0.1:51925, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/770665a7984d,45401,1731493540547 2024-11-13T10:25:41,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46143-0x10110dc99880003, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-13T10:25:41,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36821-0x10110dc99880002, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-13T10:25:41,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46143-0x10110dc99880003, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T10:25:41,745 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36821-0x10110dc99880002, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T10:25:41,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45401-0x10110dc99880000, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T10:25:41,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44657-0x10110dc99880001, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-13T10:25:41,745 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44657-0x10110dc99880001, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T10:25:41,748 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45401-0x10110dc99880000, quorum=127.0.0.1:51925, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-13T10:25:41,750 INFO [master/770665a7984d:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/770665a7984d,45401,1731493540547 from backup master directory 2024-11-13T10:25:41,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45401-0x10110dc99880000, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/770665a7984d,45401,1731493540547 2024-11-13T10:25:41,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36821-0x10110dc99880002, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T10:25:41,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46143-0x10110dc99880003, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T10:25:41,755 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44657-0x10110dc99880001, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T10:25:41,755 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45401-0x10110dc99880000, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T10:25:41,756 WARN [master/770665a7984d:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T10:25:41,756 INFO [master/770665a7984d:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=770665a7984d,45401,1731493540547 2024-11-13T10:25:41,759 INFO [master/770665a7984d:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-13T10:25:41,761 INFO [master/770665a7984d:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-13T10:25:41,831 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/hbase.id] with ID: b9fd5f8e-ec8b-4cb8-95d4-350ee72f0575 2024-11-13T10:25:41,832 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/.tmp/hbase.id 2024-11-13T10:25:41,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741826_1002 (size=42) 2024-11-13T10:25:41,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741826_1002 (size=42) 2024-11-13T10:25:41,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741826_1002 (size=42) 2024-11-13T10:25:41,860 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/.tmp/hbase.id]:[hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/hbase.id] 2024-11-13T10:25:41,999 INFO [master/770665a7984d:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T10:25:42,003 INFO [master/770665a7984d:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-13T10:25:42,025 INFO [master/770665a7984d:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 19ms. 2024-11-13T10:25:42,029 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36821-0x10110dc99880002, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T10:25:42,029 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45401-0x10110dc99880000, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T10:25:42,029 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44657-0x10110dc99880001, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T10:25:42,029 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46143-0x10110dc99880003, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T10:25:42,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741827_1003 (size=196) 2024-11-13T10:25:42,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741827_1003 (size=196) 2024-11-13T10:25:42,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741827_1003 (size=196) 2024-11-13T10:25:42,082 INFO [master/770665a7984d:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T10:25:42,085 INFO [master/770665a7984d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-13T10:25:42,092 INFO [master/770665a7984d:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-13T10:25:42,121 WARN [IPC Server handler 0 on default port 41249 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T10:25:42,122 WARN [IPC Server handler 0 on default port 41249 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T10:25:42,122 WARN [IPC Server handler 0 on default port 41249 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T10:25:42,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741828_1004 (size=1189) 2024-11-13T10:25:42,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741828_1004 (size=1189) 2024-11-13T10:25:42,158 INFO [master/770665a7984d:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/MasterData/data/master/store 2024-11-13T10:25:42,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741829_1005 (size=34) 2024-11-13T10:25:42,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741829_1005 (size=34) 2024-11-13T10:25:42,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741829_1005 (size=34) 2024-11-13T10:25:42,203 INFO [master/770665a7984d:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-13T10:25:42,207 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T10:25:42,208 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T10:25:42,209 INFO [master/770665a7984d:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T10:25:42,209 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T10:25:42,211 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T10:25:42,211 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T10:25:42,211 INFO [master/770665a7984d:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T10:25:42,213 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731493542208Disabling compacts and flushes for region at 1731493542208Disabling writes for close at 1731493542211 (+3 ms)Writing region close event to WAL at 1731493542211Closed at 1731493542211 2024-11-13T10:25:42,219 WARN [master/770665a7984d:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/MasterData/data/master/store/.initializing 2024-11-13T10:25:42,219 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/MasterData/WALs/770665a7984d,45401,1731493540547 2024-11-13T10:25:42,231 INFO [master/770665a7984d:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-13T10:25:42,253 INFO [master/770665a7984d:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=770665a7984d%2C45401%2C1731493540547, suffix=, logDir=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/MasterData/WALs/770665a7984d,45401,1731493540547, archiveDir=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/MasterData/oldWALs, maxLogs=10 2024-11-13T10:25:42,301 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/MasterData/WALs/770665a7984d,45401,1731493540547/770665a7984d%2C45401%2C1731493540547.1731493542261, exclude list is [], retry=0 2024-11-13T10:25:42,307 WARN [IPC Server handler 2 on default port 41249 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T10:25:42,307 WARN [IPC Server handler 2 on default port 41249 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T10:25:42,307 WARN [IPC Server handler 2 on default port 41249 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T10:25:42,336 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T10:25:42,342 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44787,DS-1af27950-119b-421a-9580-8a6fa7d1ebb0,DISK] 2024-11-13T10:25:42,342 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38649,DS-4caa984b-780d-4d07-9178-6a891c1e8e45,DISK] 2024-11-13T10:25:42,346 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-13T10:25:42,395 INFO [master/770665a7984d:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/MasterData/WALs/770665a7984d,45401,1731493540547/770665a7984d%2C45401%2C1731493540547.1731493542261 2024-11-13T10:25:42,397 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35745:35745),(127.0.0.1/127.0.0.1:45409:45409)] 2024-11-13T10:25:42,397 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-13T10:25:42,399 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T10:25:42,403 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T10:25:42,404 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T10:25:42,453 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T10:25:42,487 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-13T10:25:42,492 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:25:42,499 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T10:25:42,503 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T10:25:42,507 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-13T10:25:42,508 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:25:42,509 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:25:42,509 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T10:25:42,512 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-13T10:25:42,513 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:25:42,514 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:25:42,514 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T10:25:42,517 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-13T10:25:42,517 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:25:42,518 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:25:42,519 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T10:25:42,522 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T10:25:42,524 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T10:25:42,531 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T10:25:42,532 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T10:25:42,536 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-13T10:25:42,540 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T10:25:42,546 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T10:25:42,547 INFO [master/770665a7984d:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60241651, jitterRate=-0.10232944786548615}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-13T10:25:42,554 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731493542422Initializing all the Stores at 1731493542425 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731493542426 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493542427 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493542428 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493542428Cleaning up temporary data from old regions at 1731493542532 (+104 ms)Region opened successfully at 1731493542554 (+22 ms) 2024-11-13T10:25:42,555 INFO [master/770665a7984d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-13T10:25:42,596 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f26fe25, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=770665a7984d/172.17.0.2:0 2024-11-13T10:25:42,633 INFO [master/770665a7984d:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-13T10:25:42,647 INFO [master/770665a7984d:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-13T10:25:42,648 INFO [master/770665a7984d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-13T10:25:42,651 INFO [master/770665a7984d:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-13T10:25:42,652 INFO [master/770665a7984d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-13T10:25:42,659 INFO [master/770665a7984d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 6 msec 2024-11-13T10:25:42,659 INFO [master/770665a7984d:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-13T10:25:42,690 INFO [master/770665a7984d:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-13T10:25:42,703 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45401-0x10110dc99880000, quorum=127.0.0.1:51925, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-13T10:25:42,706 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-13T10:25:42,710 INFO [master/770665a7984d:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-13T10:25:42,712 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45401-0x10110dc99880000, quorum=127.0.0.1:51925, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-13T10:25:42,714 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-13T10:25:42,717 INFO [master/770665a7984d:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-13T10:25:42,723 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45401-0x10110dc99880000, quorum=127.0.0.1:51925, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-13T10:25:42,724 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-13T10:25:42,726 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45401-0x10110dc99880000, quorum=127.0.0.1:51925, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-13T10:25:42,727 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-13T10:25:42,752 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45401-0x10110dc99880000, quorum=127.0.0.1:51925, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-13T10:25:42,756 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-13T10:25:42,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45401-0x10110dc99880000, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T10:25:42,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46143-0x10110dc99880003, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T10:25:42,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44657-0x10110dc99880001, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T10:25:42,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44657-0x10110dc99880001, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T10:25:42,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46143-0x10110dc99880003, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T10:25:42,763 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36821-0x10110dc99880002, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T10:25:42,763 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36821-0x10110dc99880002, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T10:25:42,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45401-0x10110dc99880000, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T10:25:42,767 INFO [master/770665a7984d:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=770665a7984d,45401,1731493540547, sessionid=0x10110dc99880000, setting cluster-up flag (Was=false) 2024-11-13T10:25:42,782 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46143-0x10110dc99880003, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T10:25:42,782 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36821-0x10110dc99880002, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T10:25:42,783 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45401-0x10110dc99880000, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T10:25:42,785 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44657-0x10110dc99880001, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T10:25:42,790 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-13T10:25:42,792 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=770665a7984d,45401,1731493540547 2024-11-13T10:25:42,799 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46143-0x10110dc99880003, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T10:25:42,799 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45401-0x10110dc99880000, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T10:25:42,799 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44657-0x10110dc99880001, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T10:25:42,799 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36821-0x10110dc99880002, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T10:25:42,807 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-13T10:25:42,809 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=770665a7984d,45401,1731493540547 2024-11-13T10:25:42,818 INFO [master/770665a7984d:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-13T10:25:42,882 INFO [RS:1;770665a7984d:36821 {}] regionserver.HRegionServer(746): ClusterId : b9fd5f8e-ec8b-4cb8-95d4-350ee72f0575 2024-11-13T10:25:42,882 INFO [RS:0;770665a7984d:44657 {}] regionserver.HRegionServer(746): ClusterId : b9fd5f8e-ec8b-4cb8-95d4-350ee72f0575 2024-11-13T10:25:42,882 INFO [RS:2;770665a7984d:46143 {}] regionserver.HRegionServer(746): ClusterId : b9fd5f8e-ec8b-4cb8-95d4-350ee72f0575 2024-11-13T10:25:42,886 DEBUG [RS:1;770665a7984d:36821 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-13T10:25:42,888 DEBUG [RS:0;770665a7984d:44657 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-13T10:25:42,888 DEBUG [RS:2;770665a7984d:46143 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-13T10:25:42,893 DEBUG [RS:0;770665a7984d:44657 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-13T10:25:42,893 DEBUG [RS:1;770665a7984d:36821 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-13T10:25:42,893 DEBUG [RS:0;770665a7984d:44657 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-13T10:25:42,894 DEBUG [RS:1;770665a7984d:36821 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-13T10:25:42,898 DEBUG [RS:1;770665a7984d:36821 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-13T10:25:42,898 DEBUG [RS:0;770665a7984d:44657 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-13T10:25:42,898 DEBUG [RS:0;770665a7984d:44657 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f87cf7a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=770665a7984d/172.17.0.2:0 2024-11-13T10:25:42,899 DEBUG [RS:2;770665a7984d:46143 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-13T10:25:42,899 DEBUG [RS:2;770665a7984d:46143 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-13T10:25:42,904 DEBUG [RS:1;770665a7984d:36821 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54f87d3b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=770665a7984d/172.17.0.2:0 2024-11-13T10:25:42,919 DEBUG [RS:2;770665a7984d:46143 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-13T10:25:42,920 DEBUG [RS:2;770665a7984d:46143 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7069cf43, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=770665a7984d/172.17.0.2:0 2024-11-13T10:25:42,931 DEBUG [RS:1;770665a7984d:36821 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;770665a7984d:36821 2024-11-13T10:25:42,936 INFO [RS:1;770665a7984d:36821 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-13T10:25:42,937 INFO [RS:1;770665a7984d:36821 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-13T10:25:42,937 DEBUG [RS:1;770665a7984d:36821 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-13T10:25:42,941 INFO [RS:1;770665a7984d:36821 {}] regionserver.HRegionServer(2659): reportForDuty to master=770665a7984d,45401,1731493540547 with port=36821, startcode=1731493541562 2024-11-13T10:25:42,943 DEBUG [RS:0;770665a7984d:44657 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;770665a7984d:44657 2024-11-13T10:25:42,944 INFO [RS:0;770665a7984d:44657 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-13T10:25:42,944 INFO [RS:0;770665a7984d:44657 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-13T10:25:42,944 DEBUG [RS:0;770665a7984d:44657 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-13T10:25:42,957 INFO [RS:0;770665a7984d:44657 {}] regionserver.HRegionServer(2659): reportForDuty to master=770665a7984d,45401,1731493540547 with port=44657, startcode=1731493541444 2024-11-13T10:25:42,961 DEBUG [RS:2;770665a7984d:46143 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;770665a7984d:46143 2024-11-13T10:25:42,962 INFO [RS:2;770665a7984d:46143 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-13T10:25:42,962 INFO [RS:2;770665a7984d:46143 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-13T10:25:42,962 DEBUG [RS:2;770665a7984d:46143 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-13T10:25:42,962 DEBUG [RS:1;770665a7984d:36821 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-13T10:25:42,964 INFO [RS:2;770665a7984d:46143 {}] regionserver.HRegionServer(2659): reportForDuty to master=770665a7984d,45401,1731493540547 with port=46143, startcode=1731493541627 2024-11-13T10:25:42,964 DEBUG [RS:0;770665a7984d:44657 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-13T10:25:42,964 DEBUG [RS:2;770665a7984d:46143 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-13T10:25:42,967 INFO [AsyncFSWAL-0-hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/MasterData-prefix:770665a7984d,45401,1731493540547 {}] compress.Compression(560): Loaded codec org.apache.hadoop.hbase.io.compress.ReusableStreamGzipCodec for compression algorithm GZ 2024-11-13T10:25:43,010 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-13T10:25:43,020 INFO [HMaster-EventLoopGroup-2-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44619, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-13T10:25:43,020 INFO [HMaster-EventLoopGroup-2-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34083, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-13T10:25:43,020 INFO [HMaster-EventLoopGroup-2-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52789, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-13T10:25:43,028 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45401 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-13T10:25:43,031 INFO [master/770665a7984d:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-13T10:25:43,035 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45401 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-13T10:25:43,036 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45401 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-13T10:25:43,042 INFO [master/770665a7984d:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-13T10:25:43,051 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 770665a7984d,45401,1731493540547 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-13T10:25:43,065 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/770665a7984d:0, corePoolSize=5, maxPoolSize=5 2024-11-13T10:25:43,066 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/770665a7984d:0, corePoolSize=5, maxPoolSize=5 2024-11-13T10:25:43,066 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/770665a7984d:0, corePoolSize=5, maxPoolSize=5 2024-11-13T10:25:43,066 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/770665a7984d:0, corePoolSize=5, maxPoolSize=5 2024-11-13T10:25:43,066 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/770665a7984d:0, corePoolSize=10, maxPoolSize=10 2024-11-13T10:25:43,066 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/770665a7984d:0, corePoolSize=1, maxPoolSize=1 2024-11-13T10:25:43,067 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/770665a7984d:0, corePoolSize=2, maxPoolSize=2 2024-11-13T10:25:43,067 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/770665a7984d:0, corePoolSize=1, maxPoolSize=1 2024-11-13T10:25:43,075 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T10:25:43,075 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-13T10:25:43,076 DEBUG [RS:0;770665a7984d:44657 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-13T10:25:43,076 DEBUG [RS:2;770665a7984d:46143 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-13T10:25:43,076 DEBUG [RS:1;770665a7984d:36821 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-13T10:25:43,076 WARN [RS:0;770665a7984d:44657 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-13T10:25:43,076 WARN [RS:1;770665a7984d:36821 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-13T10:25:43,076 WARN [RS:2;770665a7984d:46143 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-13T10:25:43,082 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:25:43,083 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-13T10:25:43,089 INFO [master/770665a7984d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731493573089 2024-11-13T10:25:43,091 INFO [master/770665a7984d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-13T10:25:43,092 INFO [master/770665a7984d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-13T10:25:43,096 INFO [master/770665a7984d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-13T10:25:43,096 INFO [master/770665a7984d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-13T10:25:43,097 INFO [master/770665a7984d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-13T10:25:43,097 INFO [master/770665a7984d:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-13T10:25:43,100 INFO [master/770665a7984d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T10:25:43,109 INFO [master/770665a7984d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-13T10:25:43,110 INFO [master/770665a7984d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-13T10:25:43,111 INFO [master/770665a7984d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-13T10:25:43,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741831_1007 (size=1321) 2024-11-13T10:25:43,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741831_1007 (size=1321) 2024-11-13T10:25:43,140 INFO [master/770665a7984d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-13T10:25:43,141 INFO [master/770665a7984d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-13T10:25:43,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741831_1007 (size=1321) 2024-11-13T10:25:43,144 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-13T10:25:43,145 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510 2024-11-13T10:25:43,148 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/770665a7984d:0:becomeActiveMaster-HFileCleaner.large.0-1731493543143,5,FailOnTimeoutGroup] 2024-11-13T10:25:43,151 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/770665a7984d:0:becomeActiveMaster-HFileCleaner.small.0-1731493543149,5,FailOnTimeoutGroup] 2024-11-13T10:25:43,152 INFO [master/770665a7984d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T10:25:43,152 INFO [master/770665a7984d:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-13T10:25:43,153 WARN [IPC Server handler 0 on default port 41249 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T10:25:43,153 WARN [IPC Server handler 0 on default port 41249 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T10:25:43,153 WARN [IPC Server handler 0 on default port 41249 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T10:25:43,153 INFO [master/770665a7984d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-13T10:25:43,154 INFO [master/770665a7984d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-13T10:25:43,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741832_1008 (size=32) 2024-11-13T10:25:43,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741832_1008 (size=32) 2024-11-13T10:25:43,174 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T10:25:43,177 INFO [RS:1;770665a7984d:36821 {}] regionserver.HRegionServer(2659): reportForDuty to master=770665a7984d,45401,1731493540547 with port=36821, startcode=1731493541562 2024-11-13T10:25:43,178 INFO [RS:2;770665a7984d:46143 {}] regionserver.HRegionServer(2659): reportForDuty to master=770665a7984d,45401,1731493540547 with port=46143, startcode=1731493541627 2024-11-13T10:25:43,179 INFO [RS:0;770665a7984d:44657 {}] regionserver.HRegionServer(2659): reportForDuty to master=770665a7984d,45401,1731493540547 with port=44657, startcode=1731493541444 2024-11-13T10:25:43,180 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45401 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 770665a7984d,36821,1731493541562 2024-11-13T10:25:43,183 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45401 {}] master.ServerManager(517): Registering regionserver=770665a7984d,36821,1731493541562 2024-11-13T10:25:43,184 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T10:25:43,188 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T10:25:43,188 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:25:43,192 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T10:25:43,193 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T10:25:43,194 DEBUG [RS:1;770665a7984d:36821 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510 2024-11-13T10:25:43,195 DEBUG [RS:1;770665a7984d:36821 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41249 2024-11-13T10:25:43,195 DEBUG [RS:1;770665a7984d:36821 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-13T10:25:43,197 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45401 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 770665a7984d,44657,1731493541444 2024-11-13T10:25:43,198 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45401 {}] master.ServerManager(517): Registering regionserver=770665a7984d,44657,1731493541444 2024-11-13T10:25:43,198 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T10:25:43,198 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:25:43,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45401-0x10110dc99880000, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T10:25:43,203 DEBUG [RS:1;770665a7984d:36821 {}] zookeeper.ZKUtil(111): regionserver:36821-0x10110dc99880002, quorum=127.0.0.1:51925, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/770665a7984d,36821,1731493541562 2024-11-13T10:25:43,203 WARN [RS:1;770665a7984d:36821 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T10:25:43,203 INFO [RS:1;770665a7984d:36821 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-13T10:25:43,203 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45401 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 770665a7984d,46143,1731493541627 2024-11-13T10:25:43,203 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45401 {}] master.ServerManager(517): Registering regionserver=770665a7984d,46143,1731493541627 2024-11-13T10:25:43,203 DEBUG [RS:1;770665a7984d:36821 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/WALs/770665a7984d,36821,1731493541562 2024-11-13T10:25:43,203 DEBUG [RS:0;770665a7984d:44657 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510 2024-11-13T10:25:43,204 DEBUG [RS:0;770665a7984d:44657 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41249 2024-11-13T10:25:43,204 DEBUG [RS:0;770665a7984d:44657 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-13T10:25:43,205 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T10:25:43,205 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T10:25:43,207 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [770665a7984d,36821,1731493541562] 2024-11-13T10:25:43,208 DEBUG [RS:2;770665a7984d:46143 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510 2024-11-13T10:25:43,208 DEBUG [RS:2;770665a7984d:46143 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41249 2024-11-13T10:25:43,208 DEBUG [RS:2;770665a7984d:46143 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-13T10:25:43,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45401-0x10110dc99880000, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T10:25:43,216 DEBUG [RS:0;770665a7984d:44657 {}] zookeeper.ZKUtil(111): regionserver:44657-0x10110dc99880001, quorum=127.0.0.1:51925, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/770665a7984d,44657,1731493541444 2024-11-13T10:25:43,216 WARN [RS:0;770665a7984d:44657 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T10:25:43,216 INFO [RS:0;770665a7984d:44657 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-13T10:25:43,216 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [770665a7984d,44657,1731493541444] 2024-11-13T10:25:43,217 DEBUG [RS:0;770665a7984d:44657 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/WALs/770665a7984d,44657,1731493541444 2024-11-13T10:25:43,217 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [770665a7984d,46143,1731493541627] 2024-11-13T10:25:43,218 DEBUG [RS:2;770665a7984d:46143 {}] zookeeper.ZKUtil(111): regionserver:46143-0x10110dc99880003, quorum=127.0.0.1:51925, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/770665a7984d,46143,1731493541627 2024-11-13T10:25:43,218 WARN [RS:2;770665a7984d:46143 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T10:25:43,219 INFO [RS:2;770665a7984d:46143 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-13T10:25:43,219 DEBUG [RS:2;770665a7984d:46143 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/WALs/770665a7984d,46143,1731493541627 2024-11-13T10:25:43,220 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T10:25:43,220 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:25:43,223 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T10:25:43,223 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T10:25:43,227 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T10:25:43,228 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:25:43,229 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T10:25:43,229 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T10:25:43,231 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/hbase/meta/1588230740 2024-11-13T10:25:43,232 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/hbase/meta/1588230740 2024-11-13T10:25:43,238 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T10:25:43,238 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T10:25:43,239 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-13T10:25:43,243 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T10:25:43,260 INFO [RS:1;770665a7984d:36821 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-13T10:25:43,260 INFO [RS:0;770665a7984d:44657 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-13T10:25:43,260 INFO [RS:2;770665a7984d:46143 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-13T10:25:43,261 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T10:25:43,263 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72190569, jitterRate=0.0757233053445816}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-13T10:25:43,267 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731493543175Initializing all the Stores at 1731493543181 (+6 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731493543181Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731493543183 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493543183Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731493543183Cleaning up temporary data from old regions at 1731493543238 (+55 ms)Region opened successfully at 1731493543267 (+29 ms) 2024-11-13T10:25:43,267 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T10:25:43,268 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T10:25:43,268 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T10:25:43,268 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T10:25:43,268 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T10:25:43,270 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T10:25:43,270 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731493543267Disabling compacts and flushes for region at 1731493543267Disabling writes for close at 1731493543268 (+1 ms)Writing region close event to WAL at 1731493543270 (+2 ms)Closed at 1731493543270 2024-11-13T10:25:43,277 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T10:25:43,277 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-13T10:25:43,279 INFO [RS:0;770665a7984d:44657 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-13T10:25:43,280 INFO [RS:1;770665a7984d:36821 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-13T10:25:43,283 INFO [RS:2;770665a7984d:46143 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-13T10:25:43,287 INFO [RS:2;770665a7984d:46143 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-13T10:25:43,287 INFO [RS:1;770665a7984d:36821 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-13T10:25:43,287 INFO [RS:0;770665a7984d:44657 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-13T10:25:43,287 INFO [RS:2;770665a7984d:46143 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T10:25:43,287 INFO [RS:0;770665a7984d:44657 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T10:25:43,287 INFO [RS:1;770665a7984d:36821 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T10:25:43,287 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-13T10:25:43,288 INFO [RS:2;770665a7984d:46143 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-13T10:25:43,288 INFO [RS:1;770665a7984d:36821 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-13T10:25:43,288 INFO [RS:0;770665a7984d:44657 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-13T10:25:43,295 INFO [RS:2;770665a7984d:46143 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-13T10:25:43,295 INFO [RS:1;770665a7984d:36821 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-13T10:25:43,295 INFO [RS:0;770665a7984d:44657 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-13T10:25:43,296 INFO [RS:1;770665a7984d:36821 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-13T10:25:43,297 DEBUG [RS:1;770665a7984d:36821 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/770665a7984d:0, corePoolSize=1, maxPoolSize=1 2024-11-13T10:25:43,297 DEBUG [RS:1;770665a7984d:36821 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/770665a7984d:0, corePoolSize=1, maxPoolSize=1 2024-11-13T10:25:43,297 DEBUG [RS:1;770665a7984d:36821 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/770665a7984d:0, corePoolSize=1, maxPoolSize=1 2024-11-13T10:25:43,297 DEBUG [RS:1;770665a7984d:36821 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/770665a7984d:0, corePoolSize=1, maxPoolSize=1 2024-11-13T10:25:43,297 DEBUG [RS:1;770665a7984d:36821 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/770665a7984d:0, corePoolSize=1, maxPoolSize=1 2024-11-13T10:25:43,297 DEBUG [RS:1;770665a7984d:36821 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/770665a7984d:0, corePoolSize=2, maxPoolSize=2 2024-11-13T10:25:43,297 DEBUG [RS:1;770665a7984d:36821 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/770665a7984d:0, corePoolSize=1, maxPoolSize=1 2024-11-13T10:25:43,297 DEBUG [RS:1;770665a7984d:36821 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/770665a7984d:0, corePoolSize=1, maxPoolSize=1 2024-11-13T10:25:43,298 DEBUG [RS:1;770665a7984d:36821 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/770665a7984d:0, corePoolSize=1, maxPoolSize=1 2024-11-13T10:25:43,298 DEBUG [RS:1;770665a7984d:36821 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/770665a7984d:0, corePoolSize=1, maxPoolSize=1 2024-11-13T10:25:43,298 DEBUG [RS:1;770665a7984d:36821 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/770665a7984d:0, corePoolSize=1, maxPoolSize=1 2024-11-13T10:25:43,298 DEBUG [RS:1;770665a7984d:36821 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/770665a7984d:0, corePoolSize=1, maxPoolSize=1 2024-11-13T10:25:43,298 DEBUG [RS:1;770665a7984d:36821 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/770665a7984d:0, corePoolSize=3, maxPoolSize=3 2024-11-13T10:25:43,298 DEBUG [RS:1;770665a7984d:36821 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/770665a7984d:0, corePoolSize=3, maxPoolSize=3 2024-11-13T10:25:43,298 INFO [RS:0;770665a7984d:44657 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-13T10:25:43,298 INFO [RS:2;770665a7984d:46143 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-13T10:25:43,299 DEBUG [RS:0;770665a7984d:44657 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/770665a7984d:0, corePoolSize=1, maxPoolSize=1 2024-11-13T10:25:43,299 DEBUG [RS:2;770665a7984d:46143 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/770665a7984d:0, corePoolSize=1, maxPoolSize=1 2024-11-13T10:25:43,299 DEBUG [RS:0;770665a7984d:44657 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/770665a7984d:0, corePoolSize=1, maxPoolSize=1 2024-11-13T10:25:43,299 DEBUG [RS:2;770665a7984d:46143 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/770665a7984d:0, corePoolSize=1, maxPoolSize=1 2024-11-13T10:25:43,299 DEBUG [RS:0;770665a7984d:44657 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/770665a7984d:0, corePoolSize=1, maxPoolSize=1 2024-11-13T10:25:43,299 DEBUG [RS:2;770665a7984d:46143 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/770665a7984d:0, corePoolSize=1, maxPoolSize=1 2024-11-13T10:25:43,299 DEBUG [RS:0;770665a7984d:44657 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/770665a7984d:0, corePoolSize=1, maxPoolSize=1 2024-11-13T10:25:43,299 DEBUG [RS:2;770665a7984d:46143 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/770665a7984d:0, corePoolSize=1, maxPoolSize=1 2024-11-13T10:25:43,300 DEBUG [RS:0;770665a7984d:44657 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/770665a7984d:0, corePoolSize=1, maxPoolSize=1 2024-11-13T10:25:43,300 DEBUG [RS:2;770665a7984d:46143 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/770665a7984d:0, corePoolSize=1, maxPoolSize=1 2024-11-13T10:25:43,300 DEBUG [RS:0;770665a7984d:44657 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/770665a7984d:0, corePoolSize=2, maxPoolSize=2 2024-11-13T10:25:43,300 DEBUG [RS:2;770665a7984d:46143 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/770665a7984d:0, corePoolSize=2, maxPoolSize=2 2024-11-13T10:25:43,300 DEBUG [RS:0;770665a7984d:44657 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/770665a7984d:0, corePoolSize=1, maxPoolSize=1 2024-11-13T10:25:43,300 DEBUG [RS:2;770665a7984d:46143 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/770665a7984d:0, corePoolSize=1, maxPoolSize=1 2024-11-13T10:25:43,300 DEBUG [RS:0;770665a7984d:44657 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/770665a7984d:0, corePoolSize=1, maxPoolSize=1 2024-11-13T10:25:43,300 DEBUG [RS:2;770665a7984d:46143 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/770665a7984d:0, corePoolSize=1, maxPoolSize=1 2024-11-13T10:25:43,300 DEBUG [RS:0;770665a7984d:44657 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/770665a7984d:0, corePoolSize=1, maxPoolSize=1 2024-11-13T10:25:43,300 DEBUG [RS:2;770665a7984d:46143 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/770665a7984d:0, corePoolSize=1, maxPoolSize=1 2024-11-13T10:25:43,300 DEBUG [RS:0;770665a7984d:44657 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/770665a7984d:0, corePoolSize=1, maxPoolSize=1 2024-11-13T10:25:43,301 DEBUG [RS:2;770665a7984d:46143 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/770665a7984d:0, corePoolSize=1, maxPoolSize=1 2024-11-13T10:25:43,301 DEBUG [RS:0;770665a7984d:44657 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/770665a7984d:0, corePoolSize=1, maxPoolSize=1 2024-11-13T10:25:43,301 DEBUG [RS:2;770665a7984d:46143 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/770665a7984d:0, corePoolSize=1, maxPoolSize=1 2024-11-13T10:25:43,301 DEBUG [RS:0;770665a7984d:44657 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/770665a7984d:0, corePoolSize=1, maxPoolSize=1 2024-11-13T10:25:43,301 DEBUG [RS:2;770665a7984d:46143 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/770665a7984d:0, corePoolSize=1, maxPoolSize=1 2024-11-13T10:25:43,301 DEBUG [RS:0;770665a7984d:44657 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/770665a7984d:0, corePoolSize=3, maxPoolSize=3 2024-11-13T10:25:43,301 DEBUG [RS:2;770665a7984d:46143 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/770665a7984d:0, corePoolSize=3, maxPoolSize=3 2024-11-13T10:25:43,301 DEBUG [RS:0;770665a7984d:44657 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/770665a7984d:0, corePoolSize=3, maxPoolSize=3 2024-11-13T10:25:43,301 DEBUG [RS:2;770665a7984d:46143 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/770665a7984d:0, corePoolSize=3, maxPoolSize=3 2024-11-13T10:25:43,303 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T10:25:43,304 INFO [RS:0;770665a7984d:44657 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T10:25:43,304 INFO [RS:0;770665a7984d:44657 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T10:25:43,305 INFO [RS:0;770665a7984d:44657 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T10:25:43,305 INFO [RS:0;770665a7984d:44657 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-13T10:25:43,305 INFO [RS:0;770665a7984d:44657 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-13T10:25:43,305 INFO [RS:0;770665a7984d:44657 {}] hbase.ChoreService(168): Chore ScheduledChore name=770665a7984d,44657,1731493541444-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T10:25:43,306 INFO [RS:1;770665a7984d:36821 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T10:25:43,306 INFO [RS:1;770665a7984d:36821 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T10:25:43,306 INFO [RS:1;770665a7984d:36821 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T10:25:43,306 INFO [RS:1;770665a7984d:36821 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-13T10:25:43,307 INFO [RS:1;770665a7984d:36821 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-13T10:25:43,307 INFO [RS:1;770665a7984d:36821 {}] hbase.ChoreService(168): Chore ScheduledChore name=770665a7984d,36821,1731493541562-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T10:25:43,307 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-13T10:25:43,310 INFO [RS:2;770665a7984d:46143 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T10:25:43,311 INFO [RS:2;770665a7984d:46143 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T10:25:43,311 INFO [RS:2;770665a7984d:46143 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T10:25:43,311 INFO [RS:2;770665a7984d:46143 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-13T10:25:43,311 INFO [RS:2;770665a7984d:46143 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-13T10:25:43,311 INFO [RS:2;770665a7984d:46143 {}] hbase.ChoreService(168): Chore ScheduledChore name=770665a7984d,46143,1731493541627-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T10:25:43,337 INFO [RS:0;770665a7984d:44657 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-13T10:25:43,337 INFO [RS:1;770665a7984d:36821 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-13T10:25:43,340 INFO [RS:0;770665a7984d:44657 {}] hbase.ChoreService(168): Chore ScheduledChore name=770665a7984d,44657,1731493541444-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T10:25:43,340 INFO [RS:0;770665a7984d:44657 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T10:25:43,341 INFO [RS:0;770665a7984d:44657 {}] regionserver.Replication(171): 770665a7984d,44657,1731493541444 started 2024-11-13T10:25:43,341 INFO [RS:1;770665a7984d:36821 {}] hbase.ChoreService(168): Chore ScheduledChore name=770665a7984d,36821,1731493541562-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T10:25:43,341 INFO [RS:1;770665a7984d:36821 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T10:25:43,342 INFO [RS:1;770665a7984d:36821 {}] regionserver.Replication(171): 770665a7984d,36821,1731493541562 started 2024-11-13T10:25:43,342 INFO [RS:2;770665a7984d:46143 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-13T10:25:43,342 INFO [RS:2;770665a7984d:46143 {}] hbase.ChoreService(168): Chore ScheduledChore name=770665a7984d,46143,1731493541627-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T10:25:43,342 INFO [RS:2;770665a7984d:46143 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T10:25:43,342 INFO [RS:2;770665a7984d:46143 {}] regionserver.Replication(171): 770665a7984d,46143,1731493541627 started 2024-11-13T10:25:43,371 INFO [RS:0;770665a7984d:44657 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T10:25:43,371 INFO [RS:2;770665a7984d:46143 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T10:25:43,372 INFO [RS:0;770665a7984d:44657 {}] regionserver.HRegionServer(1482): Serving as 770665a7984d,44657,1731493541444, RpcServer on 770665a7984d/172.17.0.2:44657, sessionid=0x10110dc99880001 2024-11-13T10:25:43,371 INFO [RS:1;770665a7984d:36821 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T10:25:43,372 INFO [RS:1;770665a7984d:36821 {}] regionserver.HRegionServer(1482): Serving as 770665a7984d,36821,1731493541562, RpcServer on 770665a7984d/172.17.0.2:36821, sessionid=0x10110dc99880002 2024-11-13T10:25:43,373 DEBUG [RS:0;770665a7984d:44657 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-13T10:25:43,373 DEBUG [RS:0;770665a7984d:44657 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 770665a7984d,44657,1731493541444 2024-11-13T10:25:43,373 DEBUG [RS:1;770665a7984d:36821 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-13T10:25:43,373 DEBUG [RS:0;770665a7984d:44657 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '770665a7984d,44657,1731493541444' 2024-11-13T10:25:43,374 DEBUG [RS:1;770665a7984d:36821 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 770665a7984d,36821,1731493541562 2024-11-13T10:25:43,374 DEBUG [RS:1;770665a7984d:36821 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '770665a7984d,36821,1731493541562' 2024-11-13T10:25:43,374 DEBUG [RS:1;770665a7984d:36821 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-13T10:25:43,374 DEBUG [RS:0;770665a7984d:44657 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-13T10:25:43,375 DEBUG [RS:0;770665a7984d:44657 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-13T10:25:43,375 DEBUG [RS:1;770665a7984d:36821 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-13T10:25:43,376 DEBUG [RS:0;770665a7984d:44657 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-13T10:25:43,376 DEBUG [RS:0;770665a7984d:44657 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-13T10:25:43,376 INFO [RS:2;770665a7984d:46143 {}] regionserver.HRegionServer(1482): Serving as 770665a7984d,46143,1731493541627, RpcServer on 770665a7984d/172.17.0.2:46143, sessionid=0x10110dc99880003 2024-11-13T10:25:43,376 DEBUG [RS:0;770665a7984d:44657 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 770665a7984d,44657,1731493541444 2024-11-13T10:25:43,376 DEBUG [RS:0;770665a7984d:44657 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '770665a7984d,44657,1731493541444' 2024-11-13T10:25:43,376 DEBUG [RS:0;770665a7984d:44657 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-13T10:25:43,376 DEBUG [RS:2;770665a7984d:46143 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-13T10:25:43,377 DEBUG [RS:2;770665a7984d:46143 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 770665a7984d,46143,1731493541627 2024-11-13T10:25:43,377 DEBUG [RS:2;770665a7984d:46143 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '770665a7984d,46143,1731493541627' 2024-11-13T10:25:43,377 DEBUG [RS:2;770665a7984d:46143 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-13T10:25:43,379 DEBUG [RS:1;770665a7984d:36821 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-13T10:25:43,379 DEBUG [RS:1;770665a7984d:36821 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-13T10:25:43,380 DEBUG [RS:1;770665a7984d:36821 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 770665a7984d,36821,1731493541562 2024-11-13T10:25:43,380 DEBUG [RS:1;770665a7984d:36821 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '770665a7984d,36821,1731493541562' 2024-11-13T10:25:43,380 DEBUG [RS:1;770665a7984d:36821 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-13T10:25:43,380 DEBUG [RS:2;770665a7984d:46143 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-13T10:25:43,384 DEBUG [RS:0;770665a7984d:44657 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-13T10:25:43,384 DEBUG [RS:1;770665a7984d:36821 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-13T10:25:43,384 DEBUG [RS:2;770665a7984d:46143 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-13T10:25:43,384 DEBUG [RS:2;770665a7984d:46143 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-13T10:25:43,384 DEBUG [RS:2;770665a7984d:46143 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 770665a7984d,46143,1731493541627 2024-11-13T10:25:43,384 DEBUG [RS:2;770665a7984d:46143 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '770665a7984d,46143,1731493541627' 2024-11-13T10:25:43,384 DEBUG [RS:2;770665a7984d:46143 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-13T10:25:43,385 DEBUG [RS:0;770665a7984d:44657 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-13T10:25:43,385 INFO [RS:0;770665a7984d:44657 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-13T10:25:43,385 INFO [RS:0;770665a7984d:44657 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-13T10:25:43,385 DEBUG [RS:1;770665a7984d:36821 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-13T10:25:43,385 INFO [RS:1;770665a7984d:36821 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-13T10:25:43,385 INFO [RS:1;770665a7984d:36821 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-13T10:25:43,385 DEBUG [RS:2;770665a7984d:46143 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-13T10:25:43,386 DEBUG [RS:2;770665a7984d:46143 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-13T10:25:43,386 INFO [RS:2;770665a7984d:46143 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-13T10:25:43,386 INFO [RS:2;770665a7984d:46143 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-13T10:25:43,458 WARN [770665a7984d:45401 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-13T10:25:43,491 INFO [RS:2;770665a7984d:46143 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-13T10:25:43,491 INFO [RS:0;770665a7984d:44657 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-13T10:25:43,491 INFO [RS:1;770665a7984d:36821 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-13T10:25:43,495 INFO [RS:2;770665a7984d:46143 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=770665a7984d%2C46143%2C1731493541627, suffix=, logDir=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/WALs/770665a7984d,46143,1731493541627, archiveDir=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/oldWALs, maxLogs=32 2024-11-13T10:25:43,495 INFO [RS:0;770665a7984d:44657 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=770665a7984d%2C44657%2C1731493541444, suffix=, logDir=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/WALs/770665a7984d,44657,1731493541444, archiveDir=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/oldWALs, maxLogs=32 2024-11-13T10:25:43,495 INFO [RS:1;770665a7984d:36821 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=770665a7984d%2C36821%2C1731493541562, suffix=, logDir=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/WALs/770665a7984d,36821,1731493541562, archiveDir=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/oldWALs, maxLogs=32 2024-11-13T10:25:43,519 DEBUG [RS:0;770665a7984d:44657 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/WALs/770665a7984d,44657,1731493541444/770665a7984d%2C44657%2C1731493541444.1731493543503, exclude list is [], retry=0 2024-11-13T10:25:43,520 DEBUG [RS:2;770665a7984d:46143 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/WALs/770665a7984d,46143,1731493541627/770665a7984d%2C46143%2C1731493541627.1731493543503, exclude list is [], retry=0 2024-11-13T10:25:43,521 DEBUG [RS:1;770665a7984d:36821 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/WALs/770665a7984d,36821,1731493541562/770665a7984d%2C36821%2C1731493541562.1731493543503, exclude list is [], retry=0 2024-11-13T10:25:43,522 WARN [IPC Server handler 2 on default port 41249 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T10:25:43,522 WARN [IPC Server handler 2 on default port 41249 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T10:25:43,523 WARN [IPC Server handler 2 on default port 41249 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T10:25:43,523 WARN [IPC Server handler 1 on default port 41249 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T10:25:43,524 WARN [IPC Server handler 1 on default port 41249 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T10:25:43,524 WARN [IPC Server handler 1 on default port 41249 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T10:25:43,527 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44787,DS-1af27950-119b-421a-9580-8a6fa7d1ebb0,DISK] 2024-11-13T10:25:43,529 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38649,DS-4caa984b-780d-4d07-9178-6a891c1e8e45,DISK] 2024-11-13T10:25:43,529 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45097,DS-2ac43560-8e20-498a-852c-1b3a1f0157e9,DISK] 2024-11-13T10:25:43,530 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38649,DS-4caa984b-780d-4d07-9178-6a891c1e8e45,DISK] 2024-11-13T10:25:43,530 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44787,DS-1af27950-119b-421a-9580-8a6fa7d1ebb0,DISK] 2024-11-13T10:25:43,559 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44787,DS-1af27950-119b-421a-9580-8a6fa7d1ebb0,DISK] 2024-11-13T10:25:43,559 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38649,DS-4caa984b-780d-4d07-9178-6a891c1e8e45,DISK] 2024-11-13T10:25:43,565 INFO [RS:0;770665a7984d:44657 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/WALs/770665a7984d,44657,1731493541444/770665a7984d%2C44657%2C1731493541444.1731493543503 2024-11-13T10:25:43,570 INFO [RS:1;770665a7984d:36821 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/WALs/770665a7984d,36821,1731493541562/770665a7984d%2C36821%2C1731493541562.1731493543503 2024-11-13T10:25:43,571 DEBUG [RS:0;770665a7984d:44657 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35745:35745),(127.0.0.1/127.0.0.1:45409:45409)] 2024-11-13T10:25:43,571 INFO [RS:2;770665a7984d:46143 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/WALs/770665a7984d,46143,1731493541627/770665a7984d%2C46143%2C1731493541627.1731493543503 2024-11-13T10:25:43,571 DEBUG [RS:1;770665a7984d:36821 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42913:42913),(127.0.0.1/127.0.0.1:35745:35745),(127.0.0.1/127.0.0.1:45409:45409)] 2024-11-13T10:25:43,571 DEBUG [RS:2;770665a7984d:46143 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35745:35745),(127.0.0.1/127.0.0.1:45409:45409)] 2024-11-13T10:25:43,711 DEBUG [770665a7984d:45401 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-13T10:25:43,720 DEBUG [770665a7984d:45401 {}] balancer.BalancerClusterState(204): Hosts are {770665a7984d=0} racks are {/default-rack=0} 2024-11-13T10:25:43,727 DEBUG [770665a7984d:45401 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-13T10:25:43,727 DEBUG [770665a7984d:45401 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-13T10:25:43,727 DEBUG [770665a7984d:45401 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-13T10:25:43,728 DEBUG [770665a7984d:45401 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-13T10:25:43,728 DEBUG [770665a7984d:45401 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-13T10:25:43,728 DEBUG [770665a7984d:45401 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-13T10:25:43,728 INFO [770665a7984d:45401 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-13T10:25:43,728 INFO [770665a7984d:45401 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-13T10:25:43,728 INFO [770665a7984d:45401 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-13T10:25:43,728 DEBUG [770665a7984d:45401 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-13T10:25:43,739 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=770665a7984d,44657,1731493541444 2024-11-13T10:25:43,745 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 770665a7984d,44657,1731493541444, state=OPENING 2024-11-13T10:25:43,751 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-13T10:25:43,753 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44657-0x10110dc99880001, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T10:25:43,753 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36821-0x10110dc99880002, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T10:25:43,753 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45401-0x10110dc99880000, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T10:25:43,753 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46143-0x10110dc99880003, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T10:25:43,754 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T10:25:43,754 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T10:25:43,755 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T10:25:43,755 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T10:25:43,757 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T10:25:43,760 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=770665a7984d,44657,1731493541444}] 2024-11-13T10:25:43,939 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-13T10:25:43,941 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53439, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-13T10:25:43,953 INFO [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-13T10:25:43,954 INFO [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-13T10:25:43,955 INFO [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-13T10:25:43,958 INFO [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=770665a7984d%2C44657%2C1731493541444.meta, suffix=.meta, logDir=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/WALs/770665a7984d,44657,1731493541444, archiveDir=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/oldWALs, maxLogs=32 2024-11-13T10:25:43,975 DEBUG [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/WALs/770665a7984d,44657,1731493541444/770665a7984d%2C44657%2C1731493541444.meta.1731493543959.meta, exclude list is [], retry=0 2024-11-13T10:25:43,977 WARN [IPC Server handler 4 on default port 41249 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T10:25:43,977 WARN [IPC Server handler 4 on default port 41249 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T10:25:43,977 WARN [IPC Server handler 4 on default port 41249 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T10:25:43,979 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38649,DS-4caa984b-780d-4d07-9178-6a891c1e8e45,DISK] 2024-11-13T10:25:43,979 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44787,DS-1af27950-119b-421a-9580-8a6fa7d1ebb0,DISK] 2024-11-13T10:25:43,982 INFO [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/WALs/770665a7984d,44657,1731493541444/770665a7984d%2C44657%2C1731493541444.meta.1731493543959.meta 2024-11-13T10:25:43,982 DEBUG [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45409:45409),(127.0.0.1/127.0.0.1:35745:35745)] 2024-11-13T10:25:43,983 DEBUG [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-13T10:25:43,985 DEBUG [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-13T10:25:43,989 DEBUG [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-13T10:25:43,993 INFO [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-13T10:25:43,998 DEBUG [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-13T10:25:43,998 DEBUG [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T10:25:43,998 DEBUG [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-13T10:25:43,998 DEBUG [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-13T10:25:44,003 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T10:25:44,005 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T10:25:44,005 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:25:44,006 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T10:25:44,007 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T10:25:44,008 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T10:25:44,009 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:25:44,010 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T10:25:44,010 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T10:25:44,011 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T10:25:44,012 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:25:44,013 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T10:25:44,013 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T10:25:44,015 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T10:25:44,015 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:25:44,016 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T10:25:44,016 DEBUG [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T10:25:44,018 DEBUG [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/hbase/meta/1588230740 2024-11-13T10:25:44,021 DEBUG [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/hbase/meta/1588230740 2024-11-13T10:25:44,023 DEBUG [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T10:25:44,023 DEBUG [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T10:25:44,024 DEBUG [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-13T10:25:44,028 DEBUG [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T10:25:44,029 INFO [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60334301, jitterRate=-0.10094885528087616}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-13T10:25:44,030 DEBUG [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-13T10:25:44,031 DEBUG [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731493543999Writing region info on filesystem at 1731493543999Initializing all the Stores at 1731493544002 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731493544002Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731493544003 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493544003Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731493544003Cleaning up temporary data from old regions at 1731493544023 (+20 ms)Running coprocessor post-open hooks at 1731493544030 (+7 ms)Region opened successfully at 1731493544031 (+1 ms) 2024-11-13T10:25:44,039 INFO [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731493543929 2024-11-13T10:25:44,051 DEBUG [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-13T10:25:44,052 INFO [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-13T10:25:44,054 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=770665a7984d,44657,1731493541444 2024-11-13T10:25:44,057 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 770665a7984d,44657,1731493541444, state=OPEN 2024-11-13T10:25:44,060 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44657-0x10110dc99880001, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T10:25:44,060 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46143-0x10110dc99880003, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T10:25:44,060 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45401-0x10110dc99880000, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T10:25:44,061 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T10:25:44,061 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T10:25:44,061 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T10:25:44,062 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=770665a7984d,44657,1731493541444 2024-11-13T10:25:44,062 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36821-0x10110dc99880002, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T10:25:44,062 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T10:25:44,071 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-13T10:25:44,071 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=770665a7984d,44657,1731493541444 in 303 msec 2024-11-13T10:25:44,086 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-13T10:25:44,086 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 786 msec 2024-11-13T10:25:44,090 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T10:25:44,090 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-13T10:25:44,109 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T10:25:44,110 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T10:25:44,110 WARN [RedundancyMonitor {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK, ARCHIVE], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T10:25:44,110 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) All required storage types are unavailable: unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T10:25:44,110 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T10:25:44,110 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T10:25:44,111 WARN [RedundancyMonitor {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK, ARCHIVE], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T10:25:44,111 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) All required storage types are unavailable: unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T10:25:44,118 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T10:25:44,120 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=770665a7984d,44657,1731493541444, seqNum=-1] 2024-11-13T10:25:44,156 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T10:25:44,162 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60513, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T10:25:44,198 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.3310 sec 2024-11-13T10:25:44,198 INFO [master/770665a7984d:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731493544198, completionTime=-1 2024-11-13T10:25:44,201 INFO [master/770665a7984d:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-13T10:25:44,201 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-13T10:25:44,237 INFO [master/770665a7984d:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=3 2024-11-13T10:25:44,237 INFO [master/770665a7984d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731493604237 2024-11-13T10:25:44,237 INFO [master/770665a7984d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731493664237 2024-11-13T10:25:44,237 INFO [master/770665a7984d:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 36 msec 2024-11-13T10:25:44,240 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-13T10:25:44,276 INFO [master/770665a7984d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=770665a7984d,45401,1731493540547-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T10:25:44,276 INFO [master/770665a7984d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=770665a7984d,45401,1731493540547-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T10:25:44,276 INFO [master/770665a7984d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=770665a7984d,45401,1731493540547-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T10:25:44,278 INFO [master/770665a7984d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-770665a7984d:45401, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T10:25:44,278 INFO [master/770665a7984d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-13T10:25:44,279 INFO [master/770665a7984d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-13T10:25:44,287 DEBUG [master/770665a7984d:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-13T10:25:44,318 INFO [master/770665a7984d:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.561sec 2024-11-13T10:25:44,327 INFO [master/770665a7984d:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-13T10:25:44,328 INFO [master/770665a7984d:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-13T10:25:44,329 INFO [master/770665a7984d:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-13T10:25:44,330 INFO [master/770665a7984d:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-13T10:25:44,330 INFO [master/770665a7984d:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-13T10:25:44,331 INFO [master/770665a7984d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=770665a7984d,45401,1731493540547-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T10:25:44,331 INFO [master/770665a7984d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=770665a7984d,45401,1731493540547-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-13T10:25:44,335 DEBUG [master/770665a7984d:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-13T10:25:44,336 INFO [master/770665a7984d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-13T10:25:44,337 INFO [master/770665a7984d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=770665a7984d,45401,1731493540547-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T10:25:44,392 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a940a79, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T10:25:44,393 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 770665a7984d,45401,-1 for getting cluster id 2024-11-13T10:25:44,396 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T10:25:44,404 DEBUG [HMaster-EventLoopGroup-2-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'b9fd5f8e-ec8b-4cb8-95d4-350ee72f0575' 2024-11-13T10:25:44,406 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T10:25:44,407 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "b9fd5f8e-ec8b-4cb8-95d4-350ee72f0575" 2024-11-13T10:25:44,407 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4325a86b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T10:25:44,407 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [770665a7984d,45401,-1] 2024-11-13T10:25:44,409 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T10:25:44,411 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T10:25:44,413 INFO [HMaster-EventLoopGroup-2-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53526, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T10:25:44,415 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@604ede98, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T10:25:44,416 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T10:25:44,423 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=770665a7984d,44657,1731493541444, seqNum=-1] 2024-11-13T10:25:44,423 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T10:25:44,426 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49982, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T10:25:44,447 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=770665a7984d,45401,1731493540547 2024-11-13T10:25:44,448 INFO [Time-limited test {}] wal.AbstractTestWALReplay(147): hbase.rootdir=hdfs://localhost:41249/hbase 2024-11-13T10:25:44,463 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testNameConflictWhenSplit0 Thread=353, OpenFileDescriptor=583, MaxFileDescriptor=1048576, SystemLoadAverage=667, ProcessCount=11, AvailableMemoryMB=967 2024-11-13T10:25:44,488 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-13T10:25:44,492 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-13T10:25:44,493 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-13T10:25:44,498 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-78899845, suffix=, logDir=hdfs://localhost:41249/hbase/WALs/hregion-78899845, archiveDir=hdfs://localhost:41249/hbase/oldWALs, maxLogs=32 2024-11-13T10:25:44,521 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-78899845/hregion-78899845.1731493544501, exclude list is [], retry=0 2024-11-13T10:25:44,526 DEBUG [AsyncFSWAL-8-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45097,DS-2ac43560-8e20-498a-852c-1b3a1f0157e9,DISK] 2024-11-13T10:25:44,527 DEBUG [AsyncFSWAL-8-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44787,DS-1af27950-119b-421a-9580-8a6fa7d1ebb0,DISK] 2024-11-13T10:25:44,528 DEBUG [AsyncFSWAL-8-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38649,DS-4caa984b-780d-4d07-9178-6a891c1e8e45,DISK] 2024-11-13T10:25:44,537 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-78899845/hregion-78899845.1731493544501 2024-11-13T10:25:44,538 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42913:42913),(127.0.0.1/127.0.0.1:35745:35745),(127.0.0.1/127.0.0.1:45409:45409)] 2024-11-13T10:25:44,538 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 36cf00619e7db33ad948691bbad1cbc1, NAME => 'testReplayEditsWrittenIntoWAL,,1731493544489.36cf00619e7db33ad948691bbad1cbc1.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenIntoWAL', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41249/hbase 2024-11-13T10:25:44,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741838_1014 (size=64) 2024-11-13T10:25:44,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741838_1014 (size=64) 2024-11-13T10:25:44,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741838_1014 (size=64) 2024-11-13T10:25:44,571 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1731493544489.36cf00619e7db33ad948691bbad1cbc1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T10:25:44,576 INFO [StoreOpener-36cf00619e7db33ad948691bbad1cbc1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 36cf00619e7db33ad948691bbad1cbc1 2024-11-13T10:25:44,580 INFO [StoreOpener-36cf00619e7db33ad948691bbad1cbc1-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 36cf00619e7db33ad948691bbad1cbc1 columnFamilyName a 2024-11-13T10:25:44,580 DEBUG [StoreOpener-36cf00619e7db33ad948691bbad1cbc1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:25:44,581 INFO [StoreOpener-36cf00619e7db33ad948691bbad1cbc1-1 {}] regionserver.HStore(327): Store=36cf00619e7db33ad948691bbad1cbc1/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:25:44,582 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 36cf00619e7db33ad948691bbad1cbc1 2024-11-13T10:25:44,583 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/36cf00619e7db33ad948691bbad1cbc1 2024-11-13T10:25:44,584 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/36cf00619e7db33ad948691bbad1cbc1 2024-11-13T10:25:44,585 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 36cf00619e7db33ad948691bbad1cbc1 2024-11-13T10:25:44,585 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 36cf00619e7db33ad948691bbad1cbc1 2024-11-13T10:25:44,588 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 36cf00619e7db33ad948691bbad1cbc1 2024-11-13T10:25:44,602 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/36cf00619e7db33ad948691bbad1cbc1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T10:25:44,603 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 36cf00619e7db33ad948691bbad1cbc1; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65748305, jitterRate=-0.020273908972740173}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T10:25:44,605 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 36cf00619e7db33ad948691bbad1cbc1: Writing region info on filesystem at 1731493544572Initializing all the Stores at 1731493544576 (+4 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493544576Cleaning up temporary data from old regions at 1731493544585 (+9 ms)Region opened successfully at 1731493544605 (+20 ms) 2024-11-13T10:25:44,605 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 36cf00619e7db33ad948691bbad1cbc1, disabling compactions & flushes 2024-11-13T10:25:44,605 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenIntoWAL,,1731493544489.36cf00619e7db33ad948691bbad1cbc1. 2024-11-13T10:25:44,605 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenIntoWAL,,1731493544489.36cf00619e7db33ad948691bbad1cbc1. 2024-11-13T10:25:44,605 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenIntoWAL,,1731493544489.36cf00619e7db33ad948691bbad1cbc1. after waiting 0 ms 2024-11-13T10:25:44,605 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenIntoWAL,,1731493544489.36cf00619e7db33ad948691bbad1cbc1. 2024-11-13T10:25:44,607 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenIntoWAL,,1731493544489.36cf00619e7db33ad948691bbad1cbc1. 2024-11-13T10:25:44,607 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 36cf00619e7db33ad948691bbad1cbc1: Waiting for close lock at 1731493544605Disabling compacts and flushes for region at 1731493544605Disabling writes for close at 1731493544605Writing region close event to WAL at 1731493544607 (+2 ms)Closed at 1731493544607 2024-11-13T10:25:44,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741837_1013 (size=95) 2024-11-13T10:25:44,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741837_1013 (size=95) 2024-11-13T10:25:44,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741837_1013 (size=95) 2024-11-13T10:25:44,644 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-13T10:25:44,644 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-78899845:(num 1731493544501) 2024-11-13T10:25:44,649 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor testMonitor 2024-11-13T10:25:44,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741839_1015 (size=320) 2024-11-13T10:25:44,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741839_1015 (size=320) 2024-11-13T10:25:44,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741839_1015 (size=320) 2024-11-13T10:25:44,678 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor testMonitor 2024-11-13T10:25:44,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741840_1016 (size=253) 2024-11-13T10:25:44,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741840_1016 (size=253) 2024-11-13T10:25:44,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741840_1016 (size=253) 2024-11-13T10:25:44,731 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:41249/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731493544484/wal-1, size=320 (320bytes) 2024-11-13T10:25:44,732 DEBUG [Time-limited test {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-13T10:25:44,732 DEBUG [Time-limited test {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-13T10:25:44,732 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41249/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731493544484/wal-1 2024-11-13T10:25:44,741 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:41249/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731493544484/wal-1 after 5ms 2024-11-13T10:25:44,748 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:41249/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731493544484/wal-1: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-13T10:25:44,750 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:41249/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731493544484/wal-1 took 20ms 2024-11-13T10:25:44,763 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:41249/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731493544484/wal-1 so closing down 2024-11-13T10:25:44,763 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-13T10:25:44,766 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000001-wal-1.temp 2024-11-13T10:25:44,768 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/36cf00619e7db33ad948691bbad1cbc1/recovered.edits/0000000000000000001-wal-1.temp 2024-11-13T10:25:44,769 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-13T10:25:44,771 WARN [IPC Server handler 0 on default port 41249 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T10:25:44,771 WARN [IPC Server handler 0 on default port 41249 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T10:25:44,771 WARN [IPC Server handler 0 on default port 41249 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T10:25:44,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741841_1017 (size=320) 2024-11-13T10:25:44,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741841_1017 (size=320) 2024-11-13T10:25:44,781 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/36cf00619e7db33ad948691bbad1cbc1/recovered.edits/0000000000000000001-wal-1.temp (wrote 2 edits, skipped 0 edits in 0 ms) 2024-11-13T10:25:44,784 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/36cf00619e7db33ad948691bbad1cbc1/recovered.edits/0000000000000000001-wal-1.temp to hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/36cf00619e7db33ad948691bbad1cbc1/recovered.edits/0000000000000000002 2024-11-13T10:25:44,790 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 2 edits across 1 Regions in 33 ms; skipped=0; WAL=hdfs://localhost:41249/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731493544484/wal-1, size=320, length=320, corrupted=false, cancelled=false 2024-11-13T10:25:44,790 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:41249/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731493544484/wal-1, journal: Splitting hdfs://localhost:41249/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731493544484/wal-1, size=320 (320bytes) at 1731493544731Finishing writing output for hdfs://localhost:41249/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731493544484/wal-1 so closing down at 1731493544763 (+32 ms)Creating recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/36cf00619e7db33ad948691bbad1cbc1/recovered.edits/0000000000000000001-wal-1.temp at 1731493544768 (+5 ms)3 split writer threads finished at 1731493544769 (+1 ms)Closed recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/36cf00619e7db33ad948691bbad1cbc1/recovered.edits/0000000000000000001-wal-1.temp (wrote 2 edits, skipped 0 edits in 0 ms) at 1731493544781 (+12 ms)Rename recovered edits hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/36cf00619e7db33ad948691bbad1cbc1/recovered.edits/0000000000000000001-wal-1.temp to hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/36cf00619e7db33ad948691bbad1cbc1/recovered.edits/0000000000000000002 at 1731493544784 (+3 ms)Processed 2 edits across 1 Regions in 33 ms; skipped=0; WAL=hdfs://localhost:41249/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731493544484/wal-1, size=320, length=320, corrupted=false, cancelled=false at 1731493544790 (+6 ms) 2024-11-13T10:25:44,807 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:41249/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731493544484/wal-2, size=253 (253bytes) 2024-11-13T10:25:44,807 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41249/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731493544484/wal-2 2024-11-13T10:25:44,808 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:41249/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731493544484/wal-2 after 1ms 2024-11-13T10:25:44,813 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:41249/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731493544484/wal-2: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-13T10:25:44,813 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:41249/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731493544484/wal-2 took 7ms 2024-11-13T10:25:44,816 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:41249/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731493544484/wal-2 so closing down 2024-11-13T10:25:44,816 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-13T10:25:44,819 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000002-wal-2.temp 2024-11-13T10:25:44,821 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/36cf00619e7db33ad948691bbad1cbc1/recovered.edits/0000000000000000002-wal-2.temp 2024-11-13T10:25:44,821 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-13T10:25:44,822 WARN [IPC Server handler 3 on default port 41249 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T10:25:44,822 WARN [IPC Server handler 3 on default port 41249 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T10:25:44,823 WARN [IPC Server handler 3 on default port 41249 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T10:25:44,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741842_1018 (size=253) 2024-11-13T10:25:44,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741842_1018 (size=253) 2024-11-13T10:25:44,829 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/36cf00619e7db33ad948691bbad1cbc1/recovered.edits/0000000000000000002-wal-2.temp (wrote 1 edits, skipped 0 edits in 0 ms) 2024-11-13T10:25:44,833 DEBUG [split-log-closeStream-pool-0 {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/36cf00619e7db33ad948691bbad1cbc1/recovered.edits/0000000000000000002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-13T10:25:44,835 WARN [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(184): Found existing old edits file and we have less entries. Deleting hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/36cf00619e7db33ad948691bbad1cbc1/recovered.edits/0000000000000000002-wal-2.temp, length=253 2024-11-13T10:25:44,837 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 1 edits across 1 Regions in 23 ms; skipped=0; WAL=hdfs://localhost:41249/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731493544484/wal-2, size=253, length=253, corrupted=false, cancelled=false 2024-11-13T10:25:44,837 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:41249/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731493544484/wal-2, journal: Splitting hdfs://localhost:41249/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731493544484/wal-2, size=253 (253bytes) at 1731493544807Finishing writing output for hdfs://localhost:41249/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731493544484/wal-2 so closing down at 1731493544816 (+9 ms)Creating recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/36cf00619e7db33ad948691bbad1cbc1/recovered.edits/0000000000000000002-wal-2.temp at 1731493544821 (+5 ms)3 split writer threads finished at 1731493544821Closed recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/36cf00619e7db33ad948691bbad1cbc1/recovered.edits/0000000000000000002-wal-2.temp (wrote 1 edits, skipped 0 edits in 0 ms) at 1731493544829 (+8 ms)Processed 1 edits across 1 Regions in 23 ms; skipped=0; WAL=hdfs://localhost:41249/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731493544484/wal-2, size=253, length=253, corrupted=false, cancelled=false at 1731493544837 (+8 ms) 2024-11-13T10:25:44,837 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-13T10:25:44,840 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:41249/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731493544484, archiveDir=hdfs://localhost:41249/hbase/oldWALs, maxLogs=32 2024-11-13T10:25:44,857 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testnameconflictwhensplit0-manual,16010,1731493544484/wal.1731493544841, exclude list is [], retry=0 2024-11-13T10:25:44,860 WARN [IPC Server handler 0 on default port 41249 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T10:25:44,860 WARN [IPC Server handler 0 on default port 41249 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T10:25:44,860 WARN [IPC Server handler 0 on default port 41249 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T10:25:44,862 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38649,DS-4caa984b-780d-4d07-9178-6a891c1e8e45,DISK] 2024-11-13T10:25:44,863 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44787,DS-1af27950-119b-421a-9580-8a6fa7d1ebb0,DISK] 2024-11-13T10:25:44,870 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testnameconflictwhensplit0-manual,16010,1731493544484/wal.1731493544841 2024-11-13T10:25:44,870 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45409:45409),(127.0.0.1/127.0.0.1:35745:35745)] 2024-11-13T10:25:44,871 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 36cf00619e7db33ad948691bbad1cbc1, NAME => 'testReplayEditsWrittenIntoWAL,,1731493544489.36cf00619e7db33ad948691bbad1cbc1.', STARTKEY => '', ENDKEY => ''} 2024-11-13T10:25:44,871 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1731493544489.36cf00619e7db33ad948691bbad1cbc1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T10:25:44,871 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 36cf00619e7db33ad948691bbad1cbc1 2024-11-13T10:25:44,871 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 36cf00619e7db33ad948691bbad1cbc1 2024-11-13T10:25:44,874 INFO [StoreOpener-36cf00619e7db33ad948691bbad1cbc1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 36cf00619e7db33ad948691bbad1cbc1 2024-11-13T10:25:44,875 INFO [StoreOpener-36cf00619e7db33ad948691bbad1cbc1-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 36cf00619e7db33ad948691bbad1cbc1 columnFamilyName a 2024-11-13T10:25:44,875 DEBUG [StoreOpener-36cf00619e7db33ad948691bbad1cbc1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:25:44,876 INFO [StoreOpener-36cf00619e7db33ad948691bbad1cbc1-1 {}] regionserver.HStore(327): Store=36cf00619e7db33ad948691bbad1cbc1/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:25:44,877 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 36cf00619e7db33ad948691bbad1cbc1 2024-11-13T10:25:44,878 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/36cf00619e7db33ad948691bbad1cbc1 2024-11-13T10:25:44,881 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/36cf00619e7db33ad948691bbad1cbc1 2024-11-13T10:25:44,883 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/36cf00619e7db33ad948691bbad1cbc1/recovered.edits/0000000000000000002 2024-11-13T10:25:44,886 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/36cf00619e7db33ad948691bbad1cbc1/recovered.edits/0000000000000000002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-13T10:25:44,894 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 2, skipped 0, firstSequenceIdInLog=1, maxSequenceIdInLog=2, path=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/36cf00619e7db33ad948691bbad1cbc1/recovered.edits/0000000000000000002 2024-11-13T10:25:44,897 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 36cf00619e7db33ad948691bbad1cbc1 1/1 column families, dataSize=108 B heapSize=512 B 2024-11-13T10:25:44,972 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/36cf00619e7db33ad948691bbad1cbc1/.tmp/a/e1fffea6b0094e519c9f33dad23e2ce2 is 58, key is testReplayEditsWrittenIntoWAL/a:1/1731493544644/Put/seqid=0 2024-11-13T10:25:44,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741844_1020 (size=5170) 2024-11-13T10:25:44,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741844_1020 (size=5170) 2024-11-13T10:25:44,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741844_1020 (size=5170) 2024-11-13T10:25:44,993 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=108 B at sequenceid=2 (bloomFilter=true), to=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/36cf00619e7db33ad948691bbad1cbc1/.tmp/a/e1fffea6b0094e519c9f33dad23e2ce2 2024-11-13T10:25:45,058 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/36cf00619e7db33ad948691bbad1cbc1/.tmp/a/e1fffea6b0094e519c9f33dad23e2ce2 as hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/36cf00619e7db33ad948691bbad1cbc1/a/e1fffea6b0094e519c9f33dad23e2ce2 2024-11-13T10:25:45,070 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/36cf00619e7db33ad948691bbad1cbc1/a/e1fffea6b0094e519c9f33dad23e2ce2, entries=2, sequenceid=2, filesize=5.0 K 2024-11-13T10:25:45,078 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~108 B/108, heapSize ~496 B/496, currentSize=0 B/0 for 36cf00619e7db33ad948691bbad1cbc1 in 179ms, sequenceid=2, compaction requested=false; wal=null 2024-11-13T10:25:45,080 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/36cf00619e7db33ad948691bbad1cbc1/recovered.edits/0000000000000000002 2024-11-13T10:25:45,081 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 36cf00619e7db33ad948691bbad1cbc1 2024-11-13T10:25:45,081 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 36cf00619e7db33ad948691bbad1cbc1 2024-11-13T10:25:45,086 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 36cf00619e7db33ad948691bbad1cbc1 2024-11-13T10:25:45,093 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/36cf00619e7db33ad948691bbad1cbc1/recovered.edits/2.seqid, newMaxSeqId=2, maxSeqId=1 2024-11-13T10:25:45,096 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 36cf00619e7db33ad948691bbad1cbc1; next sequenceid=3; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62822323, jitterRate=-0.06387443840503693}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T10:25:45,097 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 36cf00619e7db33ad948691bbad1cbc1: Writing region info on filesystem at 1731493544871Initializing all the Stores at 1731493544873 (+2 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493544873Obtaining lock to block concurrent updates at 1731493544897 (+24 ms)Preparing flush snapshotting stores in 36cf00619e7db33ad948691bbad1cbc1 at 1731493544897Finished memstore snapshotting testReplayEditsWrittenIntoWAL,,1731493544489.36cf00619e7db33ad948691bbad1cbc1., syncing WAL and waiting on mvcc, flushsize=dataSize=108, getHeapSize=496, getOffHeapSize=0, getCellsCount=2 at 1731493544900 (+3 ms)Flushing stores of testReplayEditsWrittenIntoWAL,,1731493544489.36cf00619e7db33ad948691bbad1cbc1. at 1731493544900Flushing 36cf00619e7db33ad948691bbad1cbc1/a: creating writer at 1731493544901 (+1 ms)Flushing 36cf00619e7db33ad948691bbad1cbc1/a: appending metadata at 1731493544960 (+59 ms)Flushing 36cf00619e7db33ad948691bbad1cbc1/a: closing flushed file at 1731493544963 (+3 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@65de1a9d: reopening flushed file at 1731493545056 (+93 ms)Finished flush of dataSize ~108 B/108, heapSize ~496 B/496, currentSize=0 B/0 for 36cf00619e7db33ad948691bbad1cbc1 in 179ms, sequenceid=2, compaction requested=false; wal=null at 1731493545078 (+22 ms)Cleaning up temporary data from old regions at 1731493545081 (+3 ms)Region opened successfully at 1731493545097 (+16 ms) 2024-11-13T10:25:45,132 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testNameConflictWhenSplit0 Thread=363 (was 353) Potentially hanging thread: PacketResponder: BP-357896810-172.17.0.2-1731493536919:blk_1073741843_1019, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-0-hdfs://localhost:41249/hbase-prefix:default java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2118105142_22 at /127.0.0.1:54320 [Receiving block BP-357896810-172.17.0.2-1731493536919:blk_1073741843_1019] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2118105142_22 at /127.0.0.1:54344 [Receiving block BP-357896810-172.17.0.2-1731493536919:blk_1073741843_1019] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: TestAsyncWALReplay-pool-0 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2118105142_22 at /127.0.0.1:54532 [Waiting for operation #11] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-8-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2118105142_22 at /127.0.0.1:54242 [Waiting for operation #8] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-8-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-8-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2118105142_22 at /127.0.0.1:54330 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-357896810-172.17.0.2-1731493536919:blk_1073741843_1019, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=673 (was 583) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=667 (was 667), ProcessCount=11 (was 11), AvailableMemoryMB=939 (was 967) 2024-11-13T10:25:45,146 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testNameConflictWhenSplit1 Thread=363, OpenFileDescriptor=673, MaxFileDescriptor=1048576, SystemLoadAverage=667, ProcessCount=11, AvailableMemoryMB=938 2024-11-13T10:25:45,170 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-13T10:25:45,173 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-13T10:25:45,175 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-13T10:25:45,179 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-07000176, suffix=, logDir=hdfs://localhost:41249/hbase/WALs/hregion-07000176, archiveDir=hdfs://localhost:41249/hbase/oldWALs, maxLogs=32 2024-11-13T10:25:45,195 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-07000176/hregion-07000176.1731493545180, exclude list is [], retry=0 2024-11-13T10:25:45,198 WARN [IPC Server handler 1 on default port 41249 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T10:25:45,198 WARN [IPC Server handler 1 on default port 41249 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T10:25:45,198 WARN [IPC Server handler 1 on default port 41249 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T10:25:45,200 DEBUG [AsyncFSWAL-10-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45097,DS-2ac43560-8e20-498a-852c-1b3a1f0157e9,DISK] 2024-11-13T10:25:45,200 DEBUG [AsyncFSWAL-10-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38649,DS-4caa984b-780d-4d07-9178-6a891c1e8e45,DISK] 2024-11-13T10:25:45,203 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-07000176/hregion-07000176.1731493545180 2024-11-13T10:25:45,204 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42913:42913),(127.0.0.1/127.0.0.1:45409:45409)] 2024-11-13T10:25:45,204 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => c7555282aea81583f2b8427ceb269c86, NAME => 'testReplayEditsWrittenIntoWAL,,1731493545171.c7555282aea81583f2b8427ceb269c86.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenIntoWAL', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41249/hbase 2024-11-13T10:25:45,208 WARN [IPC Server handler 4 on default port 41249 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T10:25:45,208 WARN [IPC Server handler 4 on default port 41249 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T10:25:45,208 WARN [IPC Server handler 4 on default port 41249 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T10:25:45,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741846_1022 (size=64) 2024-11-13T10:25:45,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741846_1022 (size=64) 2024-11-13T10:25:45,215 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1731493545171.c7555282aea81583f2b8427ceb269c86.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T10:25:45,217 INFO [StoreOpener-c7555282aea81583f2b8427ceb269c86-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region c7555282aea81583f2b8427ceb269c86 2024-11-13T10:25:45,219 INFO [StoreOpener-c7555282aea81583f2b8427ceb269c86-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c7555282aea81583f2b8427ceb269c86 columnFamilyName a 2024-11-13T10:25:45,219 DEBUG [StoreOpener-c7555282aea81583f2b8427ceb269c86-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:25:45,220 INFO [StoreOpener-c7555282aea81583f2b8427ceb269c86-1 {}] regionserver.HStore(327): Store=c7555282aea81583f2b8427ceb269c86/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:25:45,220 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for c7555282aea81583f2b8427ceb269c86 2024-11-13T10:25:45,221 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/c7555282aea81583f2b8427ceb269c86 2024-11-13T10:25:45,222 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/c7555282aea81583f2b8427ceb269c86 2024-11-13T10:25:45,222 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for c7555282aea81583f2b8427ceb269c86 2024-11-13T10:25:45,222 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for c7555282aea81583f2b8427ceb269c86 2024-11-13T10:25:45,224 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for c7555282aea81583f2b8427ceb269c86 2024-11-13T10:25:45,227 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/c7555282aea81583f2b8427ceb269c86/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T10:25:45,228 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened c7555282aea81583f2b8427ceb269c86; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64625742, jitterRate=-0.03700140118598938}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T10:25:45,228 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for c7555282aea81583f2b8427ceb269c86: Writing region info on filesystem at 1731493545215Initializing all the Stores at 1731493545217 (+2 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493545217Cleaning up temporary data from old regions at 1731493545222 (+5 ms)Region opened successfully at 1731493545228 (+6 ms) 2024-11-13T10:25:45,228 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing c7555282aea81583f2b8427ceb269c86, disabling compactions & flushes 2024-11-13T10:25:45,228 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenIntoWAL,,1731493545171.c7555282aea81583f2b8427ceb269c86. 2024-11-13T10:25:45,228 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenIntoWAL,,1731493545171.c7555282aea81583f2b8427ceb269c86. 2024-11-13T10:25:45,228 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenIntoWAL,,1731493545171.c7555282aea81583f2b8427ceb269c86. after waiting 0 ms 2024-11-13T10:25:45,228 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenIntoWAL,,1731493545171.c7555282aea81583f2b8427ceb269c86. 2024-11-13T10:25:45,229 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenIntoWAL,,1731493545171.c7555282aea81583f2b8427ceb269c86. 2024-11-13T10:25:45,229 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for c7555282aea81583f2b8427ceb269c86: Waiting for close lock at 1731493545228Disabling compacts and flushes for region at 1731493545228Disabling writes for close at 1731493545228Writing region close event to WAL at 1731493545229 (+1 ms)Closed at 1731493545229 2024-11-13T10:25:45,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741845_1021 (size=95) 2024-11-13T10:25:45,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741845_1021 (size=95) 2024-11-13T10:25:45,235 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-13T10:25:45,235 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-07000176:(num 1731493545180) 2024-11-13T10:25:45,236 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor testMonitor 2024-11-13T10:25:45,238 WARN [IPC Server handler 2 on default port 41249 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T10:25:45,238 WARN [IPC Server handler 2 on default port 41249 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T10:25:45,238 WARN [IPC Server handler 2 on default port 41249 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T10:25:45,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741847_1023 (size=320) 2024-11-13T10:25:45,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741847_1023 (size=320) 2024-11-13T10:25:45,246 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor testMonitor 2024-11-13T10:25:45,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741848_1024 (size=253) 2024-11-13T10:25:45,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741848_1024 (size=253) 2024-11-13T10:25:45,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741848_1024 (size=253) 2024-11-13T10:25:45,684 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:41249/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731493545169/wal-2, size=253 (253bytes) 2024-11-13T10:25:45,684 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41249/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731493545169/wal-2 2024-11-13T10:25:45,685 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:41249/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731493545169/wal-2 after 1ms 2024-11-13T10:25:45,689 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:41249/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731493545169/wal-2: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-13T10:25:45,690 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:41249/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731493545169/wal-2 took 6ms 2024-11-13T10:25:45,692 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:41249/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731493545169/wal-2 so closing down 2024-11-13T10:25:45,692 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-13T10:25:45,695 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000002-wal-2.temp 2024-11-13T10:25:45,696 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/c7555282aea81583f2b8427ceb269c86/recovered.edits/0000000000000000002-wal-2.temp 2024-11-13T10:25:45,697 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-13T10:25:45,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741849_1025 (size=253) 2024-11-13T10:25:45,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741849_1025 (size=253) 2024-11-13T10:25:45,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741849_1025 (size=253) 2024-11-13T10:25:45,706 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/c7555282aea81583f2b8427ceb269c86/recovered.edits/0000000000000000002-wal-2.temp (wrote 1 edits, skipped 0 edits in 0 ms) 2024-11-13T10:25:45,708 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/c7555282aea81583f2b8427ceb269c86/recovered.edits/0000000000000000002-wal-2.temp to hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/c7555282aea81583f2b8427ceb269c86/recovered.edits/0000000000000000002 2024-11-13T10:25:45,708 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 1 edits across 1 Regions in 18 ms; skipped=0; WAL=hdfs://localhost:41249/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731493545169/wal-2, size=253, length=253, corrupted=false, cancelled=false 2024-11-13T10:25:45,708 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:41249/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731493545169/wal-2, journal: Splitting hdfs://localhost:41249/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731493545169/wal-2, size=253 (253bytes) at 1731493545684Finishing writing output for hdfs://localhost:41249/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731493545169/wal-2 so closing down at 1731493545692 (+8 ms)Creating recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/c7555282aea81583f2b8427ceb269c86/recovered.edits/0000000000000000002-wal-2.temp at 1731493545696 (+4 ms)3 split writer threads finished at 1731493545697 (+1 ms)Closed recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/c7555282aea81583f2b8427ceb269c86/recovered.edits/0000000000000000002-wal-2.temp (wrote 1 edits, skipped 0 edits in 0 ms) at 1731493545706 (+9 ms)Rename recovered edits hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/c7555282aea81583f2b8427ceb269c86/recovered.edits/0000000000000000002-wal-2.temp to hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/c7555282aea81583f2b8427ceb269c86/recovered.edits/0000000000000000002 at 1731493545708 (+2 ms)Processed 1 edits across 1 Regions in 18 ms; skipped=0; WAL=hdfs://localhost:41249/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731493545169/wal-2, size=253, length=253, corrupted=false, cancelled=false at 1731493545708 2024-11-13T10:25:45,732 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:41249/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731493545169/wal-1, size=320 (320bytes) 2024-11-13T10:25:45,732 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41249/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731493545169/wal-1 2024-11-13T10:25:45,732 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:41249/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731493545169/wal-1 after 0ms 2024-11-13T10:25:45,737 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:41249/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731493545169/wal-1: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-13T10:25:45,737 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:41249/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731493545169/wal-1 took 6ms 2024-11-13T10:25:45,740 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:41249/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731493545169/wal-1 so closing down 2024-11-13T10:25:45,740 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-13T10:25:45,742 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000001-wal-1.temp 2024-11-13T10:25:45,744 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/c7555282aea81583f2b8427ceb269c86/recovered.edits/0000000000000000001-wal-1.temp 2024-11-13T10:25:45,744 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-13T10:25:45,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741850_1026 (size=320) 2024-11-13T10:25:45,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741850_1026 (size=320) 2024-11-13T10:25:45,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741850_1026 (size=320) 2024-11-13T10:25:45,753 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/c7555282aea81583f2b8427ceb269c86/recovered.edits/0000000000000000001-wal-1.temp (wrote 2 edits, skipped 0 edits in 0 ms) 2024-11-13T10:25:45,757 DEBUG [split-log-closeStream-pool-0 {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/c7555282aea81583f2b8427ceb269c86/recovered.edits/0000000000000000002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-13T10:25:45,759 WARN [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(175): Found existing old edits file. It could be the result of a previous failed split attempt or we have duplicated wal entries. Deleting hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/c7555282aea81583f2b8427ceb269c86/recovered.edits/0000000000000000002, length=253 2024-11-13T10:25:45,761 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/c7555282aea81583f2b8427ceb269c86/recovered.edits/0000000000000000001-wal-1.temp to hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/c7555282aea81583f2b8427ceb269c86/recovered.edits/0000000000000000002 2024-11-13T10:25:45,761 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 2 edits across 1 Regions in 24 ms; skipped=0; WAL=hdfs://localhost:41249/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731493545169/wal-1, size=320, length=320, corrupted=false, cancelled=false 2024-11-13T10:25:45,761 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:41249/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731493545169/wal-1, journal: Splitting hdfs://localhost:41249/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731493545169/wal-1, size=320 (320bytes) at 1731493545732Finishing writing output for hdfs://localhost:41249/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731493545169/wal-1 so closing down at 1731493545740 (+8 ms)Creating recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/c7555282aea81583f2b8427ceb269c86/recovered.edits/0000000000000000001-wal-1.temp at 1731493545744 (+4 ms)3 split writer threads finished at 1731493545744Closed recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/c7555282aea81583f2b8427ceb269c86/recovered.edits/0000000000000000001-wal-1.temp (wrote 2 edits, skipped 0 edits in 0 ms) at 1731493545753 (+9 ms)Rename recovered edits hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/c7555282aea81583f2b8427ceb269c86/recovered.edits/0000000000000000001-wal-1.temp to hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/c7555282aea81583f2b8427ceb269c86/recovered.edits/0000000000000000002 at 1731493545761 (+8 ms)Processed 2 edits across 1 Regions in 24 ms; skipped=0; WAL=hdfs://localhost:41249/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731493545169/wal-1, size=320, length=320, corrupted=false, cancelled=false at 1731493545761 2024-11-13T10:25:45,761 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-13T10:25:45,764 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:41249/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731493545169, archiveDir=hdfs://localhost:41249/hbase/oldWALs, maxLogs=32 2024-11-13T10:25:45,785 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testnameconflictwhensplit1-manual,16010,1731493545169/wal.1731493545765, exclude list is [], retry=0 2024-11-13T10:25:45,789 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38649,DS-4caa984b-780d-4d07-9178-6a891c1e8e45,DISK] 2024-11-13T10:25:45,789 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44787,DS-1af27950-119b-421a-9580-8a6fa7d1ebb0,DISK] 2024-11-13T10:25:45,789 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45097,DS-2ac43560-8e20-498a-852c-1b3a1f0157e9,DISK] 2024-11-13T10:25:45,792 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testnameconflictwhensplit1-manual,16010,1731493545169/wal.1731493545765 2024-11-13T10:25:45,792 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45409:45409),(127.0.0.1/127.0.0.1:35745:35745),(127.0.0.1/127.0.0.1:42913:42913)] 2024-11-13T10:25:45,793 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => c7555282aea81583f2b8427ceb269c86, NAME => 'testReplayEditsWrittenIntoWAL,,1731493545171.c7555282aea81583f2b8427ceb269c86.', STARTKEY => '', ENDKEY => ''} 2024-11-13T10:25:45,793 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1731493545171.c7555282aea81583f2b8427ceb269c86.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T10:25:45,793 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for c7555282aea81583f2b8427ceb269c86 2024-11-13T10:25:45,793 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for c7555282aea81583f2b8427ceb269c86 2024-11-13T10:25:45,795 INFO [StoreOpener-c7555282aea81583f2b8427ceb269c86-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region c7555282aea81583f2b8427ceb269c86 2024-11-13T10:25:45,797 INFO [StoreOpener-c7555282aea81583f2b8427ceb269c86-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c7555282aea81583f2b8427ceb269c86 columnFamilyName a 2024-11-13T10:25:45,797 DEBUG [StoreOpener-c7555282aea81583f2b8427ceb269c86-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:25:45,798 INFO [StoreOpener-c7555282aea81583f2b8427ceb269c86-1 {}] regionserver.HStore(327): Store=c7555282aea81583f2b8427ceb269c86/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:25:45,798 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for c7555282aea81583f2b8427ceb269c86 2024-11-13T10:25:45,799 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/c7555282aea81583f2b8427ceb269c86 2024-11-13T10:25:45,801 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/c7555282aea81583f2b8427ceb269c86 2024-11-13T10:25:45,802 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/c7555282aea81583f2b8427ceb269c86/recovered.edits/0000000000000000002 2024-11-13T10:25:45,805 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/c7555282aea81583f2b8427ceb269c86/recovered.edits/0000000000000000002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-13T10:25:45,807 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 2, skipped 0, firstSequenceIdInLog=1, maxSequenceIdInLog=2, path=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/c7555282aea81583f2b8427ceb269c86/recovered.edits/0000000000000000002 2024-11-13T10:25:45,807 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing c7555282aea81583f2b8427ceb269c86 1/1 column families, dataSize=108 B heapSize=512 B 2024-11-13T10:25:45,824 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/c7555282aea81583f2b8427ceb269c86/.tmp/a/c1da87eae627427eb8b86f8a45d89d6b is 58, key is testReplayEditsWrittenIntoWAL/a:1/1731493545235/Put/seqid=0 2024-11-13T10:25:45,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741852_1028 (size=5170) 2024-11-13T10:25:45,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741852_1028 (size=5170) 2024-11-13T10:25:45,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741852_1028 (size=5170) 2024-11-13T10:25:45,905 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=108 B at sequenceid=2 (bloomFilter=true), to=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/c7555282aea81583f2b8427ceb269c86/.tmp/a/c1da87eae627427eb8b86f8a45d89d6b 2024-11-13T10:25:45,922 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/c7555282aea81583f2b8427ceb269c86/.tmp/a/c1da87eae627427eb8b86f8a45d89d6b as hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/c7555282aea81583f2b8427ceb269c86/a/c1da87eae627427eb8b86f8a45d89d6b 2024-11-13T10:25:45,935 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/c7555282aea81583f2b8427ceb269c86/a/c1da87eae627427eb8b86f8a45d89d6b, entries=2, sequenceid=2, filesize=5.0 K 2024-11-13T10:25:45,935 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~108 B/108, heapSize ~496 B/496, currentSize=0 B/0 for c7555282aea81583f2b8427ceb269c86 in 128ms, sequenceid=2, compaction requested=false; wal=null 2024-11-13T10:25:45,936 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/c7555282aea81583f2b8427ceb269c86/recovered.edits/0000000000000000002 2024-11-13T10:25:45,937 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for c7555282aea81583f2b8427ceb269c86 2024-11-13T10:25:45,937 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for c7555282aea81583f2b8427ceb269c86 2024-11-13T10:25:45,941 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for c7555282aea81583f2b8427ceb269c86 2024-11-13T10:25:45,944 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/c7555282aea81583f2b8427ceb269c86/recovered.edits/2.seqid, newMaxSeqId=2, maxSeqId=1 2024-11-13T10:25:45,946 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened c7555282aea81583f2b8427ceb269c86; next sequenceid=3; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70749255, jitterRate=0.05424605309963226}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T10:25:45,946 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for c7555282aea81583f2b8427ceb269c86: Writing region info on filesystem at 1731493545793Initializing all the Stores at 1731493545795 (+2 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493545795Obtaining lock to block concurrent updates at 1731493545807 (+12 ms)Preparing flush snapshotting stores in c7555282aea81583f2b8427ceb269c86 at 1731493545807Finished memstore snapshotting testReplayEditsWrittenIntoWAL,,1731493545171.c7555282aea81583f2b8427ceb269c86., syncing WAL and waiting on mvcc, flushsize=dataSize=108, getHeapSize=496, getOffHeapSize=0, getCellsCount=2 at 1731493545807Flushing stores of testReplayEditsWrittenIntoWAL,,1731493545171.c7555282aea81583f2b8427ceb269c86. at 1731493545807Flushing c7555282aea81583f2b8427ceb269c86/a: creating writer at 1731493545807Flushing c7555282aea81583f2b8427ceb269c86/a: appending metadata at 1731493545823 (+16 ms)Flushing c7555282aea81583f2b8427ceb269c86/a: closing flushed file at 1731493545823Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@21ad76b3: reopening flushed file at 1731493545917 (+94 ms)Finished flush of dataSize ~108 B/108, heapSize ~496 B/496, currentSize=0 B/0 for c7555282aea81583f2b8427ceb269c86 in 128ms, sequenceid=2, compaction requested=false; wal=null at 1731493545935 (+18 ms)Cleaning up temporary data from old regions at 1731493545937 (+2 ms)Region opened successfully at 1731493545946 (+9 ms) 2024-11-13T10:25:45,972 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testNameConflictWhenSplit1 Thread=372 (was 363) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2118105142_22 at /127.0.0.1:54420 [Receiving block BP-357896810-172.17.0.2-1731493536919:blk_1073741851_1027] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-357896810-172.17.0.2-1731493536919:blk_1073741851_1027, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2118105142_22 at /127.0.0.1:54532 [Waiting for operation #20] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-10-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-10-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2118105142_22 at /127.0.0.1:54242 [Waiting for operation #10] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-357896810-172.17.0.2-1731493536919:blk_1073741851_1027, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-357896810-172.17.0.2-1731493536919:blk_1073741851_1027, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2118105142_22 at /127.0.0.1:54362 [Receiving block BP-357896810-172.17.0.2-1731493536919:blk_1073741851_1027] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2118105142_22 at /127.0.0.1:54654 [Receiving block BP-357896810-172.17.0.2-1731493536919:blk_1073741851_1027] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2118105142_22 at /127.0.0.1:54330 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=755 (was 673) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=667 (was 667), ProcessCount=11 (was 11), AvailableMemoryMB=926 (was 938) 2024-11-13T10:25:45,987 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsWrittenIntoWAL Thread=372, OpenFileDescriptor=755, MaxFileDescriptor=1048576, SystemLoadAverage=667, ProcessCount=11, AvailableMemoryMB=925 2024-11-13T10:25:46,012 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-13T10:25:46,015 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-13T10:25:46,017 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-13T10:25:46,021 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-37805738, suffix=, logDir=hdfs://localhost:41249/hbase/WALs/hregion-37805738, archiveDir=hdfs://localhost:41249/hbase/oldWALs, maxLogs=32 2024-11-13T10:25:46,039 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-37805738/hregion-37805738.1731493546022, exclude list is [], retry=0 2024-11-13T10:25:46,044 DEBUG [AsyncFSWAL-12-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38649,DS-4caa984b-780d-4d07-9178-6a891c1e8e45,DISK] 2024-11-13T10:25:46,044 DEBUG [AsyncFSWAL-12-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44787,DS-1af27950-119b-421a-9580-8a6fa7d1ebb0,DISK] 2024-11-13T10:25:46,047 DEBUG [AsyncFSWAL-12-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45097,DS-2ac43560-8e20-498a-852c-1b3a1f0157e9,DISK] 2024-11-13T10:25:46,057 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-37805738/hregion-37805738.1731493546022 2024-11-13T10:25:46,058 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45409:45409),(127.0.0.1/127.0.0.1:35745:35745),(127.0.0.1/127.0.0.1:42913:42913)] 2024-11-13T10:25:46,058 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => f9782aadfdfb3b7556412d775418def4, NAME => 'testReplayEditsWrittenIntoWAL,,1731493546013.f9782aadfdfb3b7556412d775418def4.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenIntoWAL', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41249/hbase 2024-11-13T10:25:46,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741854_1030 (size=64) 2024-11-13T10:25:46,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741854_1030 (size=64) 2024-11-13T10:25:46,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741854_1030 (size=64) 2024-11-13T10:25:46,082 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1731493546013.f9782aadfdfb3b7556412d775418def4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T10:25:46,084 INFO [StoreOpener-f9782aadfdfb3b7556412d775418def4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region f9782aadfdfb3b7556412d775418def4 2024-11-13T10:25:46,087 INFO [StoreOpener-f9782aadfdfb3b7556412d775418def4-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f9782aadfdfb3b7556412d775418def4 columnFamilyName a 2024-11-13T10:25:46,087 DEBUG [StoreOpener-f9782aadfdfb3b7556412d775418def4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:25:46,088 INFO [StoreOpener-f9782aadfdfb3b7556412d775418def4-1 {}] regionserver.HStore(327): Store=f9782aadfdfb3b7556412d775418def4/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:25:46,088 INFO [StoreOpener-f9782aadfdfb3b7556412d775418def4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region f9782aadfdfb3b7556412d775418def4 2024-11-13T10:25:46,094 INFO [StoreOpener-f9782aadfdfb3b7556412d775418def4-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f9782aadfdfb3b7556412d775418def4 columnFamilyName b 2024-11-13T10:25:46,094 DEBUG [StoreOpener-f9782aadfdfb3b7556412d775418def4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:25:46,094 INFO [StoreOpener-f9782aadfdfb3b7556412d775418def4-1 {}] regionserver.HStore(327): Store=f9782aadfdfb3b7556412d775418def4/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:25:46,094 INFO [StoreOpener-f9782aadfdfb3b7556412d775418def4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region f9782aadfdfb3b7556412d775418def4 2024-11-13T10:25:46,096 INFO [StoreOpener-f9782aadfdfb3b7556412d775418def4-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f9782aadfdfb3b7556412d775418def4 columnFamilyName c 2024-11-13T10:25:46,096 DEBUG [StoreOpener-f9782aadfdfb3b7556412d775418def4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:25:46,097 INFO [StoreOpener-f9782aadfdfb3b7556412d775418def4-1 {}] regionserver.HStore(327): Store=f9782aadfdfb3b7556412d775418def4/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:25:46,097 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for f9782aadfdfb3b7556412d775418def4 2024-11-13T10:25:46,098 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/f9782aadfdfb3b7556412d775418def4 2024-11-13T10:25:46,098 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/f9782aadfdfb3b7556412d775418def4 2024-11-13T10:25:46,100 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for f9782aadfdfb3b7556412d775418def4 2024-11-13T10:25:46,100 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for f9782aadfdfb3b7556412d775418def4 2024-11-13T10:25:46,101 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenIntoWAL descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-13T10:25:46,102 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for f9782aadfdfb3b7556412d775418def4 2024-11-13T10:25:46,105 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/f9782aadfdfb3b7556412d775418def4/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T10:25:46,106 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened f9782aadfdfb3b7556412d775418def4; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73635679, jitterRate=0.09725712239742279}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-13T10:25:46,106 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for f9782aadfdfb3b7556412d775418def4: Writing region info on filesystem at 1731493546082Initializing all the Stores at 1731493546084 (+2 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493546084Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493546084Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493546084Cleaning up temporary data from old regions at 1731493546100 (+16 ms)Region opened successfully at 1731493546106 (+6 ms) 2024-11-13T10:25:46,106 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing f9782aadfdfb3b7556412d775418def4, disabling compactions & flushes 2024-11-13T10:25:46,106 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenIntoWAL,,1731493546013.f9782aadfdfb3b7556412d775418def4. 2024-11-13T10:25:46,106 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenIntoWAL,,1731493546013.f9782aadfdfb3b7556412d775418def4. 2024-11-13T10:25:46,107 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenIntoWAL,,1731493546013.f9782aadfdfb3b7556412d775418def4. after waiting 0 ms 2024-11-13T10:25:46,107 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenIntoWAL,,1731493546013.f9782aadfdfb3b7556412d775418def4. 2024-11-13T10:25:46,107 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenIntoWAL,,1731493546013.f9782aadfdfb3b7556412d775418def4. 2024-11-13T10:25:46,107 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for f9782aadfdfb3b7556412d775418def4: Waiting for close lock at 1731493546106Disabling compacts and flushes for region at 1731493546106Disabling writes for close at 1731493546107 (+1 ms)Writing region close event to WAL at 1731493546107Closed at 1731493546107 2024-11-13T10:25:46,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741853_1029 (size=95) 2024-11-13T10:25:46,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741853_1029 (size=95) 2024-11-13T10:25:46,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741853_1029 (size=95) 2024-11-13T10:25:46,116 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-13T10:25:46,116 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-37805738:(num 1731493546022) 2024-11-13T10:25:46,116 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-13T10:25:46,119 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:41249/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731493546010, archiveDir=hdfs://localhost:41249/hbase/oldWALs, maxLogs=32 2024-11-13T10:25:46,133 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731493546010/wal.1731493546120, exclude list is [], retry=0 2024-11-13T10:25:46,137 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45097,DS-2ac43560-8e20-498a-852c-1b3a1f0157e9,DISK] 2024-11-13T10:25:46,138 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44787,DS-1af27950-119b-421a-9580-8a6fa7d1ebb0,DISK] 2024-11-13T10:25:46,138 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38649,DS-4caa984b-780d-4d07-9178-6a891c1e8e45,DISK] 2024-11-13T10:25:46,143 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731493546010/wal.1731493546120 2024-11-13T10:25:46,147 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42913:42913),(127.0.0.1/127.0.0.1:35745:35745),(127.0.0.1/127.0.0.1:45409:45409)] 2024-11-13T10:25:46,429 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:41249/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731493546010/wal.1731493546120, size=0 (0bytes) 2024-11-13T10:25:46,429 WARN [Time-limited test {}] wal.WALSplitter(453): File hdfs://localhost:41249/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731493546010/wal.1731493546120 might be still open, length is 0 2024-11-13T10:25:46,429 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41249/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731493546010/wal.1731493546120 2024-11-13T10:25:46,430 WARN [IPC Server handler 2 on default port 41249 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731493546010/wal.1731493546120 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741855_1031 2024-11-13T10:25:46,431 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41249/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731493546010/wal.1731493546120 after 2ms 2024-11-13T10:25:48,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741832_1008 (size=32) 2024-11-13T10:25:48,276 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2118105142_22 at /127.0.0.1:54690 [Receiving block BP-357896810-172.17.0.2-1731493536919:blk_1073741855_1031] {}] datanode.DataXceiver(331): 127.0.0.1:45097:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54690 dst: /127.0.0.1:45097 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:45097 remote=/127.0.0.1:54690]. Total timeout mills is 60000, 58100 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T10:25:48,277 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2118105142_22 at /127.0.0.1:54400 [Receiving block BP-357896810-172.17.0.2-1731493536919:blk_1073741855_1031] {}] datanode.DataXceiver(331): 127.0.0.1:44787:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54400 dst: /127.0.0.1:44787 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T10:25:48,278 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2118105142_22 at /127.0.0.1:54462 [Receiving block BP-357896810-172.17.0.2-1731493536919:blk_1073741855_1031] {}] datanode.DataXceiver(331): 127.0.0.1:38649:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54462 dst: /127.0.0.1:38649 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T10:25:48,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741855_1032 (size=263633) 2024-11-13T10:25:48,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741855_1032 (size=263633) 2024-11-13T10:25:48,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741855_1032 (size=263633) 2024-11-13T10:25:49,507 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-13T10:25:49,567 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-13T10:25:50,432 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:41249/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731493546010/wal.1731493546120 after 4003ms 2024-11-13T10:25:50,436 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:41249/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731493546010/wal.1731493546120: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-13T10:25:50,438 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:41249/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731493546010/wal.1731493546120 took 4010ms 2024-11-13T10:25:50,442 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000001-wal.1731493546120.temp 2024-11-13T10:25:50,458 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/f9782aadfdfb3b7556412d775418def4/recovered.edits/0000000000000000001-wal.1731493546120.temp 2024-11-13T10:25:50,585 INFO [Time-limited test {}] wal.WALSplitter(493): EOF from hdfs://localhost:41249/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731493546010/wal.1731493546120; continuing. 2024-11-13T10:25:50,585 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:41249/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731493546010/wal.1731493546120 so closing down 2024-11-13T10:25:50,585 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-13T10:25:50,586 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-13T10:25:50,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741856_1033 (size=263641) 2024-11-13T10:25:50,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741856_1033 (size=263641) 2024-11-13T10:25:50,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741856_1033 (size=263641) 2024-11-13T10:25:50,596 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/f9782aadfdfb3b7556412d775418def4/recovered.edits/0000000000000000001-wal.1731493546120.temp (wrote 3002 edits, skipped 0 edits in 73 ms) 2024-11-13T10:25:50,598 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/f9782aadfdfb3b7556412d775418def4/recovered.edits/0000000000000000001-wal.1731493546120.temp to hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/f9782aadfdfb3b7556412d775418def4/recovered.edits/0000000000000003002 2024-11-13T10:25:50,599 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 3002 edits across 1 Regions in 160 ms; skipped=0; WAL=hdfs://localhost:41249/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731493546010/wal.1731493546120, size=0, length=0, corrupted=false, cancelled=false 2024-11-13T10:25:50,599 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:41249/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731493546010/wal.1731493546120, journal: Splitting hdfs://localhost:41249/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731493546010/wal.1731493546120, size=0 (0bytes) at 1731493546429Creating recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/f9782aadfdfb3b7556412d775418def4/recovered.edits/0000000000000000001-wal.1731493546120.temp at 1731493550459 (+4030 ms)Split 1024 edits, skipped 0 edits. at 1731493550522 (+63 ms)Split 2048 edits, skipped 0 edits. at 1731493550555 (+33 ms)Finishing writing output for hdfs://localhost:41249/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731493546010/wal.1731493546120 so closing down at 1731493550585 (+30 ms)3 split writer threads finished at 1731493550586 (+1 ms)Closed recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/f9782aadfdfb3b7556412d775418def4/recovered.edits/0000000000000000001-wal.1731493546120.temp (wrote 3002 edits, skipped 0 edits in 73 ms) at 1731493550596 (+10 ms)Rename recovered edits hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/f9782aadfdfb3b7556412d775418def4/recovered.edits/0000000000000000001-wal.1731493546120.temp to hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/f9782aadfdfb3b7556412d775418def4/recovered.edits/0000000000000003002 at 1731493550598 (+2 ms)Processed 3002 edits across 1 Regions in 160 ms; skipped=0; WAL=hdfs://localhost:41249/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731493546010/wal.1731493546120, size=0, length=0, corrupted=false, cancelled=false at 1731493550599 (+1 ms) 2024-11-13T10:25:50,602 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:41249/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731493546010/wal.1731493546120 to hdfs://localhost:41249/hbase/oldWALs/wal.1731493546120 2024-11-13T10:25:50,603 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/f9782aadfdfb3b7556412d775418def4/recovered.edits/0000000000000003002 2024-11-13T10:25:50,603 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-13T10:25:50,605 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:41249/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731493546010, archiveDir=hdfs://localhost:41249/hbase/oldWALs, maxLogs=32 2024-11-13T10:25:50,620 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731493546010/wal.1731493550606, exclude list is [], retry=0 2024-11-13T10:25:50,625 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38649,DS-4caa984b-780d-4d07-9178-6a891c1e8e45,DISK] 2024-11-13T10:25:50,625 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45097,DS-2ac43560-8e20-498a-852c-1b3a1f0157e9,DISK] 2024-11-13T10:25:50,626 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44787,DS-1af27950-119b-421a-9580-8a6fa7d1ebb0,DISK] 2024-11-13T10:25:50,633 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731493546010/wal.1731493550606 2024-11-13T10:25:50,633 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45409:45409),(127.0.0.1/127.0.0.1:42913:42913),(127.0.0.1/127.0.0.1:35745:35745)] 2024-11-13T10:25:50,634 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1731493546013.f9782aadfdfb3b7556412d775418def4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T10:25:50,636 INFO [StoreOpener-f9782aadfdfb3b7556412d775418def4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region f9782aadfdfb3b7556412d775418def4 2024-11-13T10:25:50,638 INFO [StoreOpener-f9782aadfdfb3b7556412d775418def4-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f9782aadfdfb3b7556412d775418def4 columnFamilyName a 2024-11-13T10:25:50,638 DEBUG [StoreOpener-f9782aadfdfb3b7556412d775418def4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:25:50,639 INFO [StoreOpener-f9782aadfdfb3b7556412d775418def4-1 {}] regionserver.HStore(327): Store=f9782aadfdfb3b7556412d775418def4/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:25:50,639 INFO [StoreOpener-f9782aadfdfb3b7556412d775418def4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region f9782aadfdfb3b7556412d775418def4 2024-11-13T10:25:50,640 INFO [StoreOpener-f9782aadfdfb3b7556412d775418def4-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f9782aadfdfb3b7556412d775418def4 columnFamilyName b 2024-11-13T10:25:50,640 DEBUG [StoreOpener-f9782aadfdfb3b7556412d775418def4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:25:50,641 INFO [StoreOpener-f9782aadfdfb3b7556412d775418def4-1 {}] regionserver.HStore(327): Store=f9782aadfdfb3b7556412d775418def4/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:25:50,641 INFO [StoreOpener-f9782aadfdfb3b7556412d775418def4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region f9782aadfdfb3b7556412d775418def4 2024-11-13T10:25:50,642 INFO [StoreOpener-f9782aadfdfb3b7556412d775418def4-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f9782aadfdfb3b7556412d775418def4 columnFamilyName c 2024-11-13T10:25:50,642 DEBUG [StoreOpener-f9782aadfdfb3b7556412d775418def4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:25:50,643 INFO [StoreOpener-f9782aadfdfb3b7556412d775418def4-1 {}] regionserver.HStore(327): Store=f9782aadfdfb3b7556412d775418def4/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:25:50,643 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for f9782aadfdfb3b7556412d775418def4 2024-11-13T10:25:50,644 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/f9782aadfdfb3b7556412d775418def4 2024-11-13T10:25:50,646 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/f9782aadfdfb3b7556412d775418def4 2024-11-13T10:25:50,647 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/f9782aadfdfb3b7556412d775418def4/recovered.edits/0000000000000003002 2024-11-13T10:25:50,651 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/f9782aadfdfb3b7556412d775418def4/recovered.edits/0000000000000003002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-13T10:25:50,703 INFO [Time-limited test {}] wal.AbstractTestWALReplay$4$1(796): InternalFlushCache Invoked 2024-11-13T10:25:51,063 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-13T10:25:51,063 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-13T10:25:51,065 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T10:25:51,065 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-13T10:25:51,065 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-13T10:25:51,065 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-13T10:25:51,066 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsWrittenIntoWAL 2024-11-13T10:25:51,066 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsWrittenIntoWAL Metrics about Tables on a single HBase RegionServer 2024-11-13T10:25:51,153 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing f9782aadfdfb3b7556412d775418def4 3/3 column families, dataSize=42.49 KB heapSize=100.11 KB 2024-11-13T10:25:51,204 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/f9782aadfdfb3b7556412d775418def4/.tmp/a/b6344d7a13fa48a48fdbe60f9b80e2b6 is 62, key is testReplayEditsWrittenIntoWAL/a:100/1731493546158/Put/seqid=0 2024-11-13T10:25:51,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741858_1035 (size=50463) 2024-11-13T10:25:51,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741858_1035 (size=50463) 2024-11-13T10:25:51,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741858_1035 (size=50463) 2024-11-13T10:25:51,220 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=754 (bloomFilter=true), to=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/f9782aadfdfb3b7556412d775418def4/.tmp/a/b6344d7a13fa48a48fdbe60f9b80e2b6 2024-11-13T10:25:51,229 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/f9782aadfdfb3b7556412d775418def4/.tmp/a/b6344d7a13fa48a48fdbe60f9b80e2b6 as hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/f9782aadfdfb3b7556412d775418def4/a/b6344d7a13fa48a48fdbe60f9b80e2b6 2024-11-13T10:25:51,238 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/f9782aadfdfb3b7556412d775418def4/a/b6344d7a13fa48a48fdbe60f9b80e2b6, entries=754, sequenceid=754, filesize=49.3 K 2024-11-13T10:25:51,238 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~42.49 KB/43512, heapSize ~99.59 KB/101984, currentSize=0 B/0 for f9782aadfdfb3b7556412d775418def4 in 85ms, sequenceid=754, compaction requested=false; wal=null 2024-11-13T10:25:51,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741828_1004 (size=1189) 2024-11-13T10:25:51,265 INFO [Time-limited test {}] wal.AbstractTestWALReplay$4$1(796): InternalFlushCache Invoked 2024-11-13T10:25:51,266 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing f9782aadfdfb3b7556412d775418def4 3/3 column families, dataSize=42.49 KB heapSize=100.11 KB 2024-11-13T10:25:51,276 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/f9782aadfdfb3b7556412d775418def4/.tmp/a/6c1303eee811422cb6b519c9a63a2a97 is 62, key is testReplayEditsWrittenIntoWAL/a:754/1731493546224/Put/seqid=0 2024-11-13T10:25:51,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741859_1036 (size=20072) 2024-11-13T10:25:51,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741859_1036 (size=20072) 2024-11-13T10:25:51,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741859_1036 (size=20072) 2024-11-13T10:25:51,293 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.93 KB at sequenceid=1508 (bloomFilter=true), to=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/f9782aadfdfb3b7556412d775418def4/.tmp/a/6c1303eee811422cb6b519c9a63a2a97 2024-11-13T10:25:51,327 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/f9782aadfdfb3b7556412d775418def4/.tmp/b/a29484b310c04d6893c9c3103cc8a919 is 62, key is testReplayEditsWrittenIntoWAL/b:100/1731493546249/Put/seqid=0 2024-11-13T10:25:51,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741860_1037 (size=35835) 2024-11-13T10:25:51,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741860_1037 (size=35835) 2024-11-13T10:25:51,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741860_1037 (size=35835) 2024-11-13T10:25:51,339 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=28.56 KB at sequenceid=1508 (bloomFilter=true), to=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/f9782aadfdfb3b7556412d775418def4/.tmp/b/a29484b310c04d6893c9c3103cc8a919 2024-11-13T10:25:51,348 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/f9782aadfdfb3b7556412d775418def4/.tmp/a/6c1303eee811422cb6b519c9a63a2a97 as hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/f9782aadfdfb3b7556412d775418def4/a/6c1303eee811422cb6b519c9a63a2a97 2024-11-13T10:25:51,356 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/f9782aadfdfb3b7556412d775418def4/a/6c1303eee811422cb6b519c9a63a2a97, entries=246, sequenceid=1508, filesize=19.6 K 2024-11-13T10:25:51,357 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/f9782aadfdfb3b7556412d775418def4/.tmp/b/a29484b310c04d6893c9c3103cc8a919 as hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/f9782aadfdfb3b7556412d775418def4/b/a29484b310c04d6893c9c3103cc8a919 2024-11-13T10:25:51,364 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/f9782aadfdfb3b7556412d775418def4/b/a29484b310c04d6893c9c3103cc8a919, entries=508, sequenceid=1508, filesize=35.0 K 2024-11-13T10:25:51,365 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~42.49 KB/43512, heapSize ~99.83 KB/102224, currentSize=0 B/0 for f9782aadfdfb3b7556412d775418def4 in 98ms, sequenceid=1508, compaction requested=false; wal=null 2024-11-13T10:25:51,382 INFO [Time-limited test {}] wal.AbstractTestWALReplay$4$1(796): InternalFlushCache Invoked 2024-11-13T10:25:51,382 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing f9782aadfdfb3b7556412d775418def4 3/3 column families, dataSize=42.49 KB heapSize=100.11 KB 2024-11-13T10:25:51,392 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/f9782aadfdfb3b7556412d775418def4/.tmp/b/4baf520b36c946feb44cb375018136dc is 62, key is testReplayEditsWrittenIntoWAL/b:508/1731493546293/Put/seqid=0 2024-11-13T10:25:51,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741861_1038 (size=35082) 2024-11-13T10:25:51,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741861_1038 (size=35082) 2024-11-13T10:25:51,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741861_1038 (size=35082) 2024-11-13T10:25:51,402 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=27.87 KB at sequenceid=2262 (bloomFilter=true), to=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/f9782aadfdfb3b7556412d775418def4/.tmp/b/4baf520b36c946feb44cb375018136dc 2024-11-13T10:25:51,431 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/f9782aadfdfb3b7556412d775418def4/.tmp/c/f35d385294d14f9da881b5f3fbcd1b50 is 62, key is testReplayEditsWrittenIntoWAL/c:100/1731493546333/Put/seqid=0 2024-11-13T10:25:51,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741862_1039 (size=20825) 2024-11-13T10:25:51,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741862_1039 (size=20825) 2024-11-13T10:25:51,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741862_1039 (size=20825) 2024-11-13T10:25:51,441 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.63 KB at sequenceid=2262 (bloomFilter=true), to=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/f9782aadfdfb3b7556412d775418def4/.tmp/c/f35d385294d14f9da881b5f3fbcd1b50 2024-11-13T10:25:51,450 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/f9782aadfdfb3b7556412d775418def4/.tmp/b/4baf520b36c946feb44cb375018136dc as hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/f9782aadfdfb3b7556412d775418def4/b/4baf520b36c946feb44cb375018136dc 2024-11-13T10:25:51,457 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/f9782aadfdfb3b7556412d775418def4/b/4baf520b36c946feb44cb375018136dc, entries=492, sequenceid=2262, filesize=34.3 K 2024-11-13T10:25:51,459 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/f9782aadfdfb3b7556412d775418def4/.tmp/c/f35d385294d14f9da881b5f3fbcd1b50 as hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/f9782aadfdfb3b7556412d775418def4/c/f35d385294d14f9da881b5f3fbcd1b50 2024-11-13T10:25:51,465 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/f9782aadfdfb3b7556412d775418def4/c/f35d385294d14f9da881b5f3fbcd1b50, entries=262, sequenceid=2262, filesize=20.3 K 2024-11-13T10:25:51,465 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~42.49 KB/43512, heapSize ~99.83 KB/102224, currentSize=0 B/0 for f9782aadfdfb3b7556412d775418def4 in 83ms, sequenceid=2262, compaction requested=false; wal=null 2024-11-13T10:25:51,475 WARN [Time-limited test {}] regionserver.HRegion(5722): No family for cell testReplayEditsWrittenIntoWAL/another family:testReplayEditsWrittenIntoWAL/1731493546374/Put/vlen=29/seqid=0 in region testReplayEditsWrittenIntoWAL,,1731493546013.f9782aadfdfb3b7556412d775418def4. 2024-11-13T10:25:51,478 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 3001, skipped 1, firstSequenceIdInLog=1, maxSequenceIdInLog=3002, path=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/f9782aadfdfb3b7556412d775418def4/recovered.edits/0000000000000003002 2024-11-13T10:25:51,479 INFO [Time-limited test {}] wal.AbstractTestWALReplay$4$1(796): InternalFlushCache Invoked 2024-11-13T10:25:51,479 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing f9782aadfdfb3b7556412d775418def4 3/3 column families, dataSize=41.85 KB heapSize=98.89 KB 2024-11-13T10:25:51,489 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/f9782aadfdfb3b7556412d775418def4/.tmp/c/263ca41cfcb04e9b96063396e8dd6ade is 62, key is testReplayEditsWrittenIntoWAL/c:262/1731493546341/Put/seqid=0 2024-11-13T10:25:51,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741863_1040 (size=50301) 2024-11-13T10:25:51,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741863_1040 (size=50301) 2024-11-13T10:25:51,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741863_1040 (size=50301) 2024-11-13T10:25:51,509 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=41.85 KB at sequenceid=3002 (bloomFilter=true), to=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/f9782aadfdfb3b7556412d775418def4/.tmp/c/263ca41cfcb04e9b96063396e8dd6ade 2024-11-13T10:25:51,517 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 263ca41cfcb04e9b96063396e8dd6ade 2024-11-13T10:25:51,518 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/f9782aadfdfb3b7556412d775418def4/.tmp/c/263ca41cfcb04e9b96063396e8dd6ade as hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/f9782aadfdfb3b7556412d775418def4/c/263ca41cfcb04e9b96063396e8dd6ade 2024-11-13T10:25:51,527 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 263ca41cfcb04e9b96063396e8dd6ade 2024-11-13T10:25:51,527 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/f9782aadfdfb3b7556412d775418def4/c/263ca41cfcb04e9b96063396e8dd6ade, entries=739, sequenceid=3002, filesize=49.1 K 2024-11-13T10:25:51,528 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~41.85 KB/42854, heapSize ~98.38 KB/100736, currentSize=0 B/0 for f9782aadfdfb3b7556412d775418def4 in 48ms, sequenceid=3002, compaction requested=false; wal=null 2024-11-13T10:25:51,529 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/f9782aadfdfb3b7556412d775418def4/recovered.edits/0000000000000003002 2024-11-13T10:25:51,530 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for f9782aadfdfb3b7556412d775418def4 2024-11-13T10:25:51,530 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for f9782aadfdfb3b7556412d775418def4 2024-11-13T10:25:51,531 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenIntoWAL descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T10:25:51,533 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for f9782aadfdfb3b7556412d775418def4 2024-11-13T10:25:51,536 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenIntoWAL/f9782aadfdfb3b7556412d775418def4/recovered.edits/3002.seqid, newMaxSeqId=3002, maxSeqId=1 2024-11-13T10:25:51,537 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened f9782aadfdfb3b7556412d775418def4; next sequenceid=3003; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=204800, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62806944, jitterRate=-0.06410360336303711}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T10:25:51,537 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for f9782aadfdfb3b7556412d775418def4: Writing region info on filesystem at 1731493550634Initializing all the Stores at 1731493550635 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493550635Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493550636 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493550636Cleaning up temporary data from old regions at 1731493551530 (+894 ms)Region opened successfully at 1731493551537 (+7 ms) 2024-11-13T10:25:51,590 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing f9782aadfdfb3b7556412d775418def4, disabling compactions & flushes 2024-11-13T10:25:51,590 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenIntoWAL,,1731493546013.f9782aadfdfb3b7556412d775418def4. 2024-11-13T10:25:51,590 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenIntoWAL,,1731493546013.f9782aadfdfb3b7556412d775418def4. 2024-11-13T10:25:51,590 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenIntoWAL,,1731493546013.f9782aadfdfb3b7556412d775418def4. after waiting 0 ms 2024-11-13T10:25:51,591 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenIntoWAL,,1731493546013.f9782aadfdfb3b7556412d775418def4. 2024-11-13T10:25:51,593 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenIntoWAL,,1731493546013.f9782aadfdfb3b7556412d775418def4. 2024-11-13T10:25:51,593 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for f9782aadfdfb3b7556412d775418def4: Waiting for close lock at 1731493551590Disabling compacts and flushes for region at 1731493551590Disabling writes for close at 1731493551590Writing region close event to WAL at 1731493551593 (+3 ms)Closed at 1731493551593 2024-11-13T10:25:51,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741857_1034 (size=95) 2024-11-13T10:25:51,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741857_1034 (size=95) 2024-11-13T10:25:51,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741857_1034 (size=95) 2024-11-13T10:25:51,600 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-13T10:25:51,600 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1731493550606) 2024-11-13T10:25:51,616 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsWrittenIntoWAL Thread=390 (was 372) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43163 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1108307321_22 at /127.0.0.1:56622 [Waiting for operation #10] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1108307321_22 at /127.0.0.1:56594 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: java.util.concurrent.ThreadPoolExecutor$Worker@240362d6[State = -1, empty queue] java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: java.util.concurrent.ThreadPoolExecutor$Worker@60db171c[State = -1, empty queue] java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/cluster_048ca973-e213-4f85-e39d-c51a07fc85b5/data/data4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1396088596) connection to localhost/127.0.0.1:35595 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1108307321_22 at /127.0.0.1:54714 [Waiting for operation #17] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/cluster_048ca973-e213-4f85-e39d-c51a07fc85b5/data/data3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-12-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1396088596) connection to localhost/127.0.0.1:43163 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: AsyncFSWAL-12-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/cluster_048ca973-e213-4f85-e39d-c51a07fc85b5/data/data6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.replay.wal.secondtime@localhost:41249 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/cluster_048ca973-e213-4f85-e39d-c51a07fc85b5/data/data5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-12-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35595 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1396088596) connection to localhost/127.0.0.1:41249 from jenkins.replay.wal.secondtime java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1108307321_22 at /127.0.0.1:47690 [Waiting for operation #18] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=835 (was 755) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=629 (was 667), ProcessCount=11 (was 11), AvailableMemoryMB=837 (was 925) 2024-11-13T10:25:51,632 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#test2727 Thread=390, OpenFileDescriptor=835, MaxFileDescriptor=1048576, SystemLoadAverage=629, ProcessCount=11, AvailableMemoryMB=836 2024-11-13T10:25:51,650 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-13T10:25:51,652 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-13T10:25:51,653 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-13T10:25:51,656 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-79516391, suffix=, logDir=hdfs://localhost:41249/hbase/WALs/hregion-79516391, archiveDir=hdfs://localhost:41249/hbase/oldWALs, maxLogs=32 2024-11-13T10:25:51,671 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-79516391/hregion-79516391.1731493551656, exclude list is [], retry=0 2024-11-13T10:25:51,674 DEBUG [AsyncFSWAL-14-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45097,DS-2ac43560-8e20-498a-852c-1b3a1f0157e9,DISK] 2024-11-13T10:25:51,674 DEBUG [AsyncFSWAL-14-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38649,DS-4caa984b-780d-4d07-9178-6a891c1e8e45,DISK] 2024-11-13T10:25:51,675 DEBUG [AsyncFSWAL-14-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44787,DS-1af27950-119b-421a-9580-8a6fa7d1ebb0,DISK] 2024-11-13T10:25:51,678 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-79516391/hregion-79516391.1731493551656 2024-11-13T10:25:51,679 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42913:42913),(127.0.0.1/127.0.0.1:45409:45409),(127.0.0.1/127.0.0.1:35745:35745)] 2024-11-13T10:25:51,679 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 2a466bdb7d2ec60c37de266733591164, NAME => 'test2727,,1731493551651.2a466bdb7d2ec60c37de266733591164.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='test2727', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41249/hbase 2024-11-13T10:25:51,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741865_1042 (size=43) 2024-11-13T10:25:51,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741865_1042 (size=43) 2024-11-13T10:25:51,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741865_1042 (size=43) 2024-11-13T10:25:51,692 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated test2727,,1731493551651.2a466bdb7d2ec60c37de266733591164.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T10:25:51,694 INFO [StoreOpener-2a466bdb7d2ec60c37de266733591164-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 2a466bdb7d2ec60c37de266733591164 2024-11-13T10:25:51,696 INFO [StoreOpener-2a466bdb7d2ec60c37de266733591164-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2a466bdb7d2ec60c37de266733591164 columnFamilyName a 2024-11-13T10:25:51,696 DEBUG [StoreOpener-2a466bdb7d2ec60c37de266733591164-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:25:51,696 INFO [StoreOpener-2a466bdb7d2ec60c37de266733591164-1 {}] regionserver.HStore(327): Store=2a466bdb7d2ec60c37de266733591164/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:25:51,697 INFO [StoreOpener-2a466bdb7d2ec60c37de266733591164-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 2a466bdb7d2ec60c37de266733591164 2024-11-13T10:25:51,698 INFO [StoreOpener-2a466bdb7d2ec60c37de266733591164-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2a466bdb7d2ec60c37de266733591164 columnFamilyName b 2024-11-13T10:25:51,699 DEBUG [StoreOpener-2a466bdb7d2ec60c37de266733591164-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:25:51,699 INFO [StoreOpener-2a466bdb7d2ec60c37de266733591164-1 {}] regionserver.HStore(327): Store=2a466bdb7d2ec60c37de266733591164/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:25:51,699 INFO [StoreOpener-2a466bdb7d2ec60c37de266733591164-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 2a466bdb7d2ec60c37de266733591164 2024-11-13T10:25:51,701 INFO [StoreOpener-2a466bdb7d2ec60c37de266733591164-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2a466bdb7d2ec60c37de266733591164 columnFamilyName c 2024-11-13T10:25:51,701 DEBUG [StoreOpener-2a466bdb7d2ec60c37de266733591164-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:25:51,701 INFO [StoreOpener-2a466bdb7d2ec60c37de266733591164-1 {}] regionserver.HStore(327): Store=2a466bdb7d2ec60c37de266733591164/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:25:51,701 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 2a466bdb7d2ec60c37de266733591164 2024-11-13T10:25:51,702 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/test2727/2a466bdb7d2ec60c37de266733591164 2024-11-13T10:25:51,702 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/test2727/2a466bdb7d2ec60c37de266733591164 2024-11-13T10:25:51,704 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 2a466bdb7d2ec60c37de266733591164 2024-11-13T10:25:51,704 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 2a466bdb7d2ec60c37de266733591164 2024-11-13T10:25:51,704 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table test2727 descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-13T10:25:51,706 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 2a466bdb7d2ec60c37de266733591164 2024-11-13T10:25:51,709 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41249/hbase/data/default/test2727/2a466bdb7d2ec60c37de266733591164/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T10:25:51,709 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 2a466bdb7d2ec60c37de266733591164; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73232734, jitterRate=0.09125277400016785}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-13T10:25:51,711 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 2a466bdb7d2ec60c37de266733591164: Writing region info on filesystem at 1731493551692Initializing all the Stores at 1731493551694 (+2 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493551694Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493551694Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493551694Cleaning up temporary data from old regions at 1731493551704 (+10 ms)Region opened successfully at 1731493551711 (+7 ms) 2024-11-13T10:25:51,711 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 2a466bdb7d2ec60c37de266733591164, disabling compactions & flushes 2024-11-13T10:25:51,711 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region test2727,,1731493551651.2a466bdb7d2ec60c37de266733591164. 2024-11-13T10:25:51,711 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on test2727,,1731493551651.2a466bdb7d2ec60c37de266733591164. 2024-11-13T10:25:51,711 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on test2727,,1731493551651.2a466bdb7d2ec60c37de266733591164. after waiting 0 ms 2024-11-13T10:25:51,711 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region test2727,,1731493551651.2a466bdb7d2ec60c37de266733591164. 2024-11-13T10:25:51,712 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed test2727,,1731493551651.2a466bdb7d2ec60c37de266733591164. 2024-11-13T10:25:51,712 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 2a466bdb7d2ec60c37de266733591164: Waiting for close lock at 1731493551711Disabling compacts and flushes for region at 1731493551711Disabling writes for close at 1731493551711Writing region close event to WAL at 1731493551712 (+1 ms)Closed at 1731493551712 2024-11-13T10:25:51,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741864_1041 (size=95) 2024-11-13T10:25:51,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741864_1041 (size=95) 2024-11-13T10:25:51,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741864_1041 (size=95) 2024-11-13T10:25:51,718 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-13T10:25:51,718 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-79516391:(num 1731493551656) 2024-11-13T10:25:51,719 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-13T10:25:51,721 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:41249/hbase/WALs/test2727-manual,16010,1731493551649, archiveDir=hdfs://localhost:41249/hbase/oldWALs, maxLogs=32 2024-11-13T10:25:51,739 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/test2727-manual,16010,1731493551649/wal.1731493551722, exclude list is [], retry=0 2024-11-13T10:25:51,742 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38649,DS-4caa984b-780d-4d07-9178-6a891c1e8e45,DISK] 2024-11-13T10:25:51,743 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45097,DS-2ac43560-8e20-498a-852c-1b3a1f0157e9,DISK] 2024-11-13T10:25:51,743 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44787,DS-1af27950-119b-421a-9580-8a6fa7d1ebb0,DISK] 2024-11-13T10:25:51,745 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/test2727-manual,16010,1731493551649/wal.1731493551722 2024-11-13T10:25:51,746 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45409:45409),(127.0.0.1/127.0.0.1:42913:42913),(127.0.0.1/127.0.0.1:35745:35745)] 2024-11-13T10:25:51,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741866_1043 (size=263359) 2024-11-13T10:25:51,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741866_1043 (size=263359) 2024-11-13T10:25:51,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741866_1043 (size=263359) 2024-11-13T10:25:51,982 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:41249/hbase/WALs/test2727-manual,16010,1731493551649/wal.1731493551722, size=257.2 K (263359bytes) 2024-11-13T10:25:51,982 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41249/hbase/WALs/test2727-manual,16010,1731493551649/wal.1731493551722 2024-11-13T10:25:51,983 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:41249/hbase/WALs/test2727-manual,16010,1731493551649/wal.1731493551722 after 1ms 2024-11-13T10:25:51,989 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:41249/hbase/WALs/test2727-manual,16010,1731493551649/wal.1731493551722: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-13T10:25:51,991 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:41249/hbase/WALs/test2727-manual,16010,1731493551649/wal.1731493551722 took 9ms 2024-11-13T10:25:51,997 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000001-wal.1731493551722.temp 2024-11-13T10:25:52,004 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:41249/hbase/data/default/test2727/2a466bdb7d2ec60c37de266733591164/recovered.edits/0000000000000000001-wal.1731493551722.temp 2024-11-13T10:25:52,075 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:41249/hbase/WALs/test2727-manual,16010,1731493551649/wal.1731493551722 so closing down 2024-11-13T10:25:52,076 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-13T10:25:52,078 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-13T10:25:52,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741867_1044 (size=263359) 2024-11-13T10:25:52,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741867_1044 (size=263359) 2024-11-13T10:25:52,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741867_1044 (size=263359) 2024-11-13T10:25:52,097 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:41249/hbase/data/default/test2727/2a466bdb7d2ec60c37de266733591164/recovered.edits/0000000000000000001-wal.1731493551722.temp (wrote 3000 edits, skipped 0 edits in 48 ms) 2024-11-13T10:25:52,102 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:41249/hbase/data/default/test2727/2a466bdb7d2ec60c37de266733591164/recovered.edits/0000000000000000001-wal.1731493551722.temp to hdfs://localhost:41249/hbase/data/default/test2727/2a466bdb7d2ec60c37de266733591164/recovered.edits/0000000000000003000 2024-11-13T10:25:52,103 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 3000 edits across 1 Regions in 111 ms; skipped=0; WAL=hdfs://localhost:41249/hbase/WALs/test2727-manual,16010,1731493551649/wal.1731493551722, size=257.2 K, length=263359, corrupted=false, cancelled=false 2024-11-13T10:25:52,103 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:41249/hbase/WALs/test2727-manual,16010,1731493551649/wal.1731493551722, journal: Splitting hdfs://localhost:41249/hbase/WALs/test2727-manual,16010,1731493551649/wal.1731493551722, size=257.2 K (263359bytes) at 1731493551982Creating recovered edits writer path=hdfs://localhost:41249/hbase/data/default/test2727/2a466bdb7d2ec60c37de266733591164/recovered.edits/0000000000000000001-wal.1731493551722.temp at 1731493552004 (+22 ms)Split 1024 edits, skipped 0 edits. at 1731493552022 (+18 ms)Split 2048 edits, skipped 0 edits. at 1731493552049 (+27 ms)Finishing writing output for hdfs://localhost:41249/hbase/WALs/test2727-manual,16010,1731493551649/wal.1731493551722 so closing down at 1731493552075 (+26 ms)3 split writer threads finished at 1731493552078 (+3 ms)Closed recovered edits writer path=hdfs://localhost:41249/hbase/data/default/test2727/2a466bdb7d2ec60c37de266733591164/recovered.edits/0000000000000000001-wal.1731493551722.temp (wrote 3000 edits, skipped 0 edits in 48 ms) at 1731493552097 (+19 ms)Rename recovered edits hdfs://localhost:41249/hbase/data/default/test2727/2a466bdb7d2ec60c37de266733591164/recovered.edits/0000000000000000001-wal.1731493551722.temp to hdfs://localhost:41249/hbase/data/default/test2727/2a466bdb7d2ec60c37de266733591164/recovered.edits/0000000000000003000 at 1731493552102 (+5 ms)Processed 3000 edits across 1 Regions in 111 ms; skipped=0; WAL=hdfs://localhost:41249/hbase/WALs/test2727-manual,16010,1731493551649/wal.1731493551722, size=257.2 K, length=263359, corrupted=false, cancelled=false at 1731493552103 (+1 ms) 2024-11-13T10:25:52,105 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:41249/hbase/WALs/test2727-manual,16010,1731493551649/wal.1731493551722 to hdfs://localhost:41249/hbase/oldWALs/wal.1731493551722 2024-11-13T10:25:52,107 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:41249/hbase/data/default/test2727/2a466bdb7d2ec60c37de266733591164/recovered.edits/0000000000000003000 2024-11-13T10:25:52,107 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-13T10:25:52,111 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:41249/hbase/WALs/test2727-manual,16010,1731493551649, archiveDir=hdfs://localhost:41249/hbase/oldWALs, maxLogs=32 2024-11-13T10:25:52,134 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/test2727-manual,16010,1731493551649/wal.1731493552112, exclude list is [], retry=0 2024-11-13T10:25:52,138 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38649,DS-4caa984b-780d-4d07-9178-6a891c1e8e45,DISK] 2024-11-13T10:25:52,139 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44787,DS-1af27950-119b-421a-9580-8a6fa7d1ebb0,DISK] 2024-11-13T10:25:52,139 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45097,DS-2ac43560-8e20-498a-852c-1b3a1f0157e9,DISK] 2024-11-13T10:25:52,148 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/test2727-manual,16010,1731493551649/wal.1731493552112 2024-11-13T10:25:52,148 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45409:45409),(127.0.0.1/127.0.0.1:35745:35745),(127.0.0.1/127.0.0.1:42913:42913)] 2024-11-13T10:25:52,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741868_1045 (size=263486) 2024-11-13T10:25:52,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741868_1045 (size=263486) 2024-11-13T10:25:52,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741868_1045 (size=263486) 2024-11-13T10:25:52,417 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:41249/hbase/WALs/test2727-manual,16010,1731493551649/wal.1731493552112, size=257.3 K (263486bytes) 2024-11-13T10:25:52,417 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41249/hbase/WALs/test2727-manual,16010,1731493551649/wal.1731493552112 2024-11-13T10:25:52,417 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:41249/hbase/WALs/test2727-manual,16010,1731493551649/wal.1731493552112 after 0ms 2024-11-13T10:25:52,421 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:41249/hbase/WALs/test2727-manual,16010,1731493551649/wal.1731493552112: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-13T10:25:52,426 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:41249/hbase/WALs/test2727-manual,16010,1731493551649/wal.1731493552112 took 9ms 2024-11-13T10:25:52,451 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000003001-wal.1731493552112.temp 2024-11-13T10:25:52,464 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:41249/hbase/data/default/test2727/2a466bdb7d2ec60c37de266733591164/recovered.edits/0000000000000003001-wal.1731493552112.temp 2024-11-13T10:25:52,569 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:41249/hbase/WALs/test2727-manual,16010,1731493551649/wal.1731493552112 so closing down 2024-11-13T10:25:52,569 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-13T10:25:52,569 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-13T10:25:52,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741869_1046 (size=263486) 2024-11-13T10:25:52,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741869_1046 (size=263486) 2024-11-13T10:25:52,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741869_1046 (size=263486) 2024-11-13T10:25:52,979 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:41249/hbase/data/default/test2727/2a466bdb7d2ec60c37de266733591164/recovered.edits/0000000000000003001-wal.1731493552112.temp (wrote 3000 edits, skipped 0 edits in 49 ms) 2024-11-13T10:25:52,980 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:41249/hbase/data/default/test2727/2a466bdb7d2ec60c37de266733591164/recovered.edits/0000000000000003001-wal.1731493552112.temp to hdfs://localhost:41249/hbase/data/default/test2727/2a466bdb7d2ec60c37de266733591164/recovered.edits/0000000000000006000 2024-11-13T10:25:52,981 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 3000 edits across 1 Regions in 540 ms; skipped=0; WAL=hdfs://localhost:41249/hbase/WALs/test2727-manual,16010,1731493551649/wal.1731493552112, size=257.3 K, length=263486, corrupted=false, cancelled=false 2024-11-13T10:25:52,981 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:41249/hbase/WALs/test2727-manual,16010,1731493551649/wal.1731493552112, journal: Splitting hdfs://localhost:41249/hbase/WALs/test2727-manual,16010,1731493551649/wal.1731493552112, size=257.3 K (263486bytes) at 1731493552417Creating recovered edits writer path=hdfs://localhost:41249/hbase/data/default/test2727/2a466bdb7d2ec60c37de266733591164/recovered.edits/0000000000000003001-wal.1731493552112.temp at 1731493552464 (+47 ms)Split 1024 edits, skipped 0 edits. at 1731493552483 (+19 ms)Split 2048 edits, skipped 0 edits. at 1731493552541 (+58 ms)Finishing writing output for hdfs://localhost:41249/hbase/WALs/test2727-manual,16010,1731493551649/wal.1731493552112 so closing down at 1731493552569 (+28 ms)3 split writer threads finished at 1731493552569Closed recovered edits writer path=hdfs://localhost:41249/hbase/data/default/test2727/2a466bdb7d2ec60c37de266733591164/recovered.edits/0000000000000003001-wal.1731493552112.temp (wrote 3000 edits, skipped 0 edits in 49 ms) at 1731493552979 (+410 ms)Rename recovered edits hdfs://localhost:41249/hbase/data/default/test2727/2a466bdb7d2ec60c37de266733591164/recovered.edits/0000000000000003001-wal.1731493552112.temp to hdfs://localhost:41249/hbase/data/default/test2727/2a466bdb7d2ec60c37de266733591164/recovered.edits/0000000000000006000 at 1731493552981 (+2 ms)Processed 3000 edits across 1 Regions in 540 ms; skipped=0; WAL=hdfs://localhost:41249/hbase/WALs/test2727-manual,16010,1731493551649/wal.1731493552112, size=257.3 K, length=263486, corrupted=false, cancelled=false at 1731493552981 2024-11-13T10:25:52,983 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:41249/hbase/WALs/test2727-manual,16010,1731493551649/wal.1731493552112 to hdfs://localhost:41249/hbase/oldWALs/wal.1731493552112 2024-11-13T10:25:52,984 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:41249/hbase/data/default/test2727/2a466bdb7d2ec60c37de266733591164/recovered.edits/0000000000000006000 2024-11-13T10:25:52,984 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-13T10:25:52,986 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:41249/hbase/WALs/test2727-manual,16010,1731493551649, archiveDir=hdfs://localhost:41249/hbase/oldWALs, maxLogs=32 2024-11-13T10:25:53,002 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/test2727-manual,16010,1731493551649/wal.1731493552986, exclude list is [], retry=0 2024-11-13T10:25:53,005 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38649,DS-4caa984b-780d-4d07-9178-6a891c1e8e45,DISK] 2024-11-13T10:25:53,005 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45097,DS-2ac43560-8e20-498a-852c-1b3a1f0157e9,DISK] 2024-11-13T10:25:53,006 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44787,DS-1af27950-119b-421a-9580-8a6fa7d1ebb0,DISK] 2024-11-13T10:25:53,008 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/test2727-manual,16010,1731493551649/wal.1731493552986 2024-11-13T10:25:53,009 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45409:45409),(127.0.0.1/127.0.0.1:42913:42913),(127.0.0.1/127.0.0.1:35745:35745)] 2024-11-13T10:25:53,009 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 2a466bdb7d2ec60c37de266733591164, NAME => 'test2727,,1731493551651.2a466bdb7d2ec60c37de266733591164.', STARTKEY => '', ENDKEY => ''} 2024-11-13T10:25:53,009 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated test2727,,1731493551651.2a466bdb7d2ec60c37de266733591164.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T10:25:53,009 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 2a466bdb7d2ec60c37de266733591164 2024-11-13T10:25:53,009 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 2a466bdb7d2ec60c37de266733591164 2024-11-13T10:25:53,011 INFO [StoreOpener-2a466bdb7d2ec60c37de266733591164-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 2a466bdb7d2ec60c37de266733591164 2024-11-13T10:25:53,012 INFO [StoreOpener-2a466bdb7d2ec60c37de266733591164-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2a466bdb7d2ec60c37de266733591164 columnFamilyName a 2024-11-13T10:25:53,012 DEBUG [StoreOpener-2a466bdb7d2ec60c37de266733591164-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:25:53,013 INFO [StoreOpener-2a466bdb7d2ec60c37de266733591164-1 {}] regionserver.HStore(327): Store=2a466bdb7d2ec60c37de266733591164/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:25:53,013 INFO [StoreOpener-2a466bdb7d2ec60c37de266733591164-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 2a466bdb7d2ec60c37de266733591164 2024-11-13T10:25:53,014 INFO [StoreOpener-2a466bdb7d2ec60c37de266733591164-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2a466bdb7d2ec60c37de266733591164 columnFamilyName b 2024-11-13T10:25:53,014 DEBUG [StoreOpener-2a466bdb7d2ec60c37de266733591164-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:25:53,014 INFO [StoreOpener-2a466bdb7d2ec60c37de266733591164-1 {}] regionserver.HStore(327): Store=2a466bdb7d2ec60c37de266733591164/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:25:53,015 INFO [StoreOpener-2a466bdb7d2ec60c37de266733591164-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 2a466bdb7d2ec60c37de266733591164 2024-11-13T10:25:53,015 INFO [StoreOpener-2a466bdb7d2ec60c37de266733591164-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2a466bdb7d2ec60c37de266733591164 columnFamilyName c 2024-11-13T10:25:53,015 DEBUG [StoreOpener-2a466bdb7d2ec60c37de266733591164-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:25:53,016 INFO [StoreOpener-2a466bdb7d2ec60c37de266733591164-1 {}] regionserver.HStore(327): Store=2a466bdb7d2ec60c37de266733591164/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:25:53,016 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 2a466bdb7d2ec60c37de266733591164 2024-11-13T10:25:53,017 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/test2727/2a466bdb7d2ec60c37de266733591164 2024-11-13T10:25:53,019 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 2 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/test2727/2a466bdb7d2ec60c37de266733591164 2024-11-13T10:25:53,020 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:41249/hbase/data/default/test2727/2a466bdb7d2ec60c37de266733591164/recovered.edits/0000000000000003000 2024-11-13T10:25:53,023 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:41249/hbase/data/default/test2727/2a466bdb7d2ec60c37de266733591164/recovered.edits/0000000000000003000: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-13T10:25:53,077 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 3000, skipped 0, firstSequenceIdInLog=1, maxSequenceIdInLog=3000, path=hdfs://localhost:41249/hbase/data/default/test2727/2a466bdb7d2ec60c37de266733591164/recovered.edits/0000000000000003000 2024-11-13T10:25:53,078 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:41249/hbase/data/default/test2727/2a466bdb7d2ec60c37de266733591164/recovered.edits/0000000000000006000 2024-11-13T10:25:53,082 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:41249/hbase/data/default/test2727/2a466bdb7d2ec60c37de266733591164/recovered.edits/0000000000000006000: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-13T10:25:53,150 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 3000, skipped 0, firstSequenceIdInLog=3001, maxSequenceIdInLog=6000, path=hdfs://localhost:41249/hbase/data/default/test2727/2a466bdb7d2ec60c37de266733591164/recovered.edits/0000000000000006000 2024-11-13T10:25:53,150 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 2a466bdb7d2ec60c37de266733591164 3/3 column families, dataSize=215.51 KB heapSize=657 KB 2024-11-13T10:25:53,186 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41249/hbase/data/default/test2727/2a466bdb7d2ec60c37de266733591164/.tmp/a/0bc1878a83e84b38a6f28f5905da5467 is 41, key is test2727/a:100/1731493552156/Put/seqid=0 2024-11-13T10:25:53,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741871_1048 (size=84227) 2024-11-13T10:25:53,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741871_1048 (size=84227) 2024-11-13T10:25:53,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741871_1048 (size=84227) 2024-11-13T10:25:53,211 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=71.84 KB at sequenceid=6000 (bloomFilter=true), to=hdfs://localhost:41249/hbase/data/default/test2727/2a466bdb7d2ec60c37de266733591164/.tmp/a/0bc1878a83e84b38a6f28f5905da5467 2024-11-13T10:25:53,249 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41249/hbase/data/default/test2727/2a466bdb7d2ec60c37de266733591164/.tmp/b/6291a261c1184531871b4d80031b899c is 41, key is test2727/b:100/1731493552240/Put/seqid=0 2024-11-13T10:25:53,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741872_1049 (size=84609) 2024-11-13T10:25:53,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741872_1049 (size=84609) 2024-11-13T10:25:53,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741872_1049 (size=84609) 2024-11-13T10:25:53,264 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=71.84 KB at sequenceid=6000 (bloomFilter=true), to=hdfs://localhost:41249/hbase/data/default/test2727/2a466bdb7d2ec60c37de266733591164/.tmp/b/6291a261c1184531871b4d80031b899c 2024-11-13T10:25:53,313 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41249/hbase/data/default/test2727/2a466bdb7d2ec60c37de266733591164/.tmp/c/daeaccaac8b94a76b8a97ddb59cb95bf is 41, key is test2727/c:100/1731493552318/Put/seqid=0 2024-11-13T10:25:53,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741873_1050 (size=84609) 2024-11-13T10:25:53,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741873_1050 (size=84609) 2024-11-13T10:25:53,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741873_1050 (size=84609) 2024-11-13T10:25:53,343 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=71.84 KB at sequenceid=6000 (bloomFilter=true), to=hdfs://localhost:41249/hbase/data/default/test2727/2a466bdb7d2ec60c37de266733591164/.tmp/c/daeaccaac8b94a76b8a97ddb59cb95bf 2024-11-13T10:25:53,353 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41249/hbase/data/default/test2727/2a466bdb7d2ec60c37de266733591164/.tmp/a/0bc1878a83e84b38a6f28f5905da5467 as hdfs://localhost:41249/hbase/data/default/test2727/2a466bdb7d2ec60c37de266733591164/a/0bc1878a83e84b38a6f28f5905da5467 2024-11-13T10:25:53,362 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41249/hbase/data/default/test2727/2a466bdb7d2ec60c37de266733591164/a/0bc1878a83e84b38a6f28f5905da5467, entries=2000, sequenceid=6000, filesize=82.3 K 2024-11-13T10:25:53,365 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41249/hbase/data/default/test2727/2a466bdb7d2ec60c37de266733591164/.tmp/b/6291a261c1184531871b4d80031b899c as hdfs://localhost:41249/hbase/data/default/test2727/2a466bdb7d2ec60c37de266733591164/b/6291a261c1184531871b4d80031b899c 2024-11-13T10:25:53,374 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41249/hbase/data/default/test2727/2a466bdb7d2ec60c37de266733591164/b/6291a261c1184531871b4d80031b899c, entries=2000, sequenceid=6000, filesize=82.6 K 2024-11-13T10:25:53,376 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41249/hbase/data/default/test2727/2a466bdb7d2ec60c37de266733591164/.tmp/c/daeaccaac8b94a76b8a97ddb59cb95bf as hdfs://localhost:41249/hbase/data/default/test2727/2a466bdb7d2ec60c37de266733591164/c/daeaccaac8b94a76b8a97ddb59cb95bf 2024-11-13T10:25:53,382 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41249/hbase/data/default/test2727/2a466bdb7d2ec60c37de266733591164/c/daeaccaac8b94a76b8a97ddb59cb95bf, entries=2000, sequenceid=6000, filesize=82.6 K 2024-11-13T10:25:53,383 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~215.51 KB/220680, heapSize ~656.95 KB/672720, currentSize=0 B/0 for 2a466bdb7d2ec60c37de266733591164 in 232ms, sequenceid=6000, compaction requested=false; wal=null 2024-11-13T10:25:53,383 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:41249/hbase/data/default/test2727/2a466bdb7d2ec60c37de266733591164/recovered.edits/0000000000000003000 2024-11-13T10:25:53,384 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:41249/hbase/data/default/test2727/2a466bdb7d2ec60c37de266733591164/recovered.edits/0000000000000006000 2024-11-13T10:25:53,385 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 2a466bdb7d2ec60c37de266733591164 2024-11-13T10:25:53,385 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 2a466bdb7d2ec60c37de266733591164 2024-11-13T10:25:53,386 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table test2727 descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-13T10:25:53,388 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 2a466bdb7d2ec60c37de266733591164 2024-11-13T10:25:53,391 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41249/hbase/data/default/test2727/2a466bdb7d2ec60c37de266733591164/recovered.edits/6000.seqid, newMaxSeqId=6000, maxSeqId=1 2024-11-13T10:25:53,392 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 2a466bdb7d2ec60c37de266733591164; next sequenceid=6001; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68770436, jitterRate=0.024759352207183838}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-13T10:25:53,393 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 2a466bdb7d2ec60c37de266733591164: Writing region info on filesystem at 1731493553009Initializing all the Stores at 1731493553011 (+2 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493553011Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493553011Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493553011Obtaining lock to block concurrent updates at 1731493553150 (+139 ms)Preparing flush snapshotting stores in 2a466bdb7d2ec60c37de266733591164 at 1731493553150Finished memstore snapshotting test2727,,1731493551651.2a466bdb7d2ec60c37de266733591164., syncing WAL and waiting on mvcc, flushsize=dataSize=220680, getHeapSize=672720, getOffHeapSize=0, getCellsCount=6000 at 1731493553150Flushing stores of test2727,,1731493551651.2a466bdb7d2ec60c37de266733591164. at 1731493553150Flushing 2a466bdb7d2ec60c37de266733591164/a: creating writer at 1731493553151 (+1 ms)Flushing 2a466bdb7d2ec60c37de266733591164/a: appending metadata at 1731493553185 (+34 ms)Flushing 2a466bdb7d2ec60c37de266733591164/a: closing flushed file at 1731493553185Flushing 2a466bdb7d2ec60c37de266733591164/b: creating writer at 1731493553220 (+35 ms)Flushing 2a466bdb7d2ec60c37de266733591164/b: appending metadata at 1731493553246 (+26 ms)Flushing 2a466bdb7d2ec60c37de266733591164/b: closing flushed file at 1731493553246Flushing 2a466bdb7d2ec60c37de266733591164/c: creating writer at 1731493553275 (+29 ms)Flushing 2a466bdb7d2ec60c37de266733591164/c: appending metadata at 1731493553312 (+37 ms)Flushing 2a466bdb7d2ec60c37de266733591164/c: closing flushed file at 1731493553312Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3a671519: reopening flushed file at 1731493553352 (+40 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@71a4f46f: reopening flushed file at 1731493553363 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@363a231f: reopening flushed file at 1731493553375 (+12 ms)Finished flush of dataSize ~215.51 KB/220680, heapSize ~656.95 KB/672720, currentSize=0 B/0 for 2a466bdb7d2ec60c37de266733591164 in 232ms, sequenceid=6000, compaction requested=false; wal=null at 1731493553383 (+8 ms)Cleaning up temporary data from old regions at 1731493553385 (+2 ms)Region opened successfully at 1731493553393 (+8 ms) 2024-11-13T10:25:53,394 DEBUG [Time-limited test {}] wal.AbstractTestWALReplay(320): region.getOpenSeqNum(): 6001, wal3.id: 0 2024-11-13T10:25:53,394 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 2a466bdb7d2ec60c37de266733591164, disabling compactions & flushes 2024-11-13T10:25:53,394 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region test2727,,1731493551651.2a466bdb7d2ec60c37de266733591164. 2024-11-13T10:25:53,394 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on test2727,,1731493551651.2a466bdb7d2ec60c37de266733591164. 2024-11-13T10:25:53,394 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on test2727,,1731493551651.2a466bdb7d2ec60c37de266733591164. after waiting 0 ms 2024-11-13T10:25:53,394 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region test2727,,1731493551651.2a466bdb7d2ec60c37de266733591164. 2024-11-13T10:25:53,396 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed test2727,,1731493551651.2a466bdb7d2ec60c37de266733591164. 2024-11-13T10:25:53,396 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 2a466bdb7d2ec60c37de266733591164: Waiting for close lock at 1731493553394Disabling compacts and flushes for region at 1731493553394Disabling writes for close at 1731493553394Writing region close event to WAL at 1731493553396 (+2 ms)Closed at 1731493553396 2024-11-13T10:25:53,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741870_1047 (size=95) 2024-11-13T10:25:53,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741870_1047 (size=95) 2024-11-13T10:25:53,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741870_1047 (size=95) 2024-11-13T10:25:53,407 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-13T10:25:53,407 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1731493552986) 2024-11-13T10:25:53,422 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#test2727 Thread=393 (was 390) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2118105142_22 at /127.0.0.1:56622 [Waiting for operation #12] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2118105142_22 at /127.0.0.1:56594 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-14-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-14-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-14-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2118105142_22 at /127.0.0.1:47806 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2118105142_22 at /127.0.0.1:45946 [Waiting for operation #9] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=899 (was 835) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=629 (was 629), ProcessCount=11 (was 11), AvailableMemoryMB=579 (was 836) 2024-11-13T10:25:53,450 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testSequentialEditLogSeqNum Thread=393, OpenFileDescriptor=899, MaxFileDescriptor=1048576, SystemLoadAverage=629, ProcessCount=11, AvailableMemoryMB=578 2024-11-13T10:25:53,476 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-13T10:25:53,485 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:41249/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1731493553475, archiveDir=hdfs://localhost:41249/hbase/oldWALs, maxLogs=32 2024-11-13T10:25:53,486 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor wal.1731493553486 2024-11-13T10:25:53,507 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testsequentialeditlogseqnum-manual,16010,1731493553475/wal.1731493553486 2024-11-13T10:25:53,516 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new MockWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35745:35745),(127.0.0.1/127.0.0.1:45409:45409),(127.0.0.1/127.0.0.1:42913:42913)] 2024-11-13T10:25:53,530 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 424a9b88c3cd18b9653b7b8f73289859, NAME => 'testSequentialEditLogSeqNum,,1731493553477.424a9b88c3cd18b9653b7b8f73289859.', STARTKEY => '', ENDKEY => ''} 2024-11-13T10:25:53,530 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testSequentialEditLogSeqNum,,1731493553477.424a9b88c3cd18b9653b7b8f73289859.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T10:25:53,530 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 424a9b88c3cd18b9653b7b8f73289859 2024-11-13T10:25:53,530 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 424a9b88c3cd18b9653b7b8f73289859 2024-11-13T10:25:53,532 WARN [Time-limited test {}] regionserver.HRegionFileSystem(836): hdfs://localhost:41249/hbase/data/default/testSequentialEditLogSeqNum/424a9b88c3cd18b9653b7b8f73289859 doesn't exist for region: 424a9b88c3cd18b9653b7b8f73289859 on table testSequentialEditLogSeqNum 2024-11-13T10:25:53,533 WARN [Time-limited test {}] regionserver.HRegionFileSystem(854): .regioninfo file not found for region: 424a9b88c3cd18b9653b7b8f73289859 on table testSequentialEditLogSeqNum 2024-11-13T10:25:53,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741875_1052 (size=62) 2024-11-13T10:25:53,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741875_1052 (size=62) 2024-11-13T10:25:53,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741875_1052 (size=62) 2024-11-13T10:25:53,570 INFO [StoreOpener-424a9b88c3cd18b9653b7b8f73289859-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 424a9b88c3cd18b9653b7b8f73289859 2024-11-13T10:25:53,573 INFO [StoreOpener-424a9b88c3cd18b9653b7b8f73289859-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 424a9b88c3cd18b9653b7b8f73289859 columnFamilyName a 2024-11-13T10:25:53,573 DEBUG [StoreOpener-424a9b88c3cd18b9653b7b8f73289859-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:25:53,574 INFO [StoreOpener-424a9b88c3cd18b9653b7b8f73289859-1 {}] regionserver.HStore(327): Store=424a9b88c3cd18b9653b7b8f73289859/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:25:53,574 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 424a9b88c3cd18b9653b7b8f73289859 2024-11-13T10:25:53,574 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testSequentialEditLogSeqNum/424a9b88c3cd18b9653b7b8f73289859 2024-11-13T10:25:53,575 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testSequentialEditLogSeqNum/424a9b88c3cd18b9653b7b8f73289859 2024-11-13T10:25:53,575 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 424a9b88c3cd18b9653b7b8f73289859 2024-11-13T10:25:53,576 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 424a9b88c3cd18b9653b7b8f73289859 2024-11-13T10:25:53,578 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 424a9b88c3cd18b9653b7b8f73289859 2024-11-13T10:25:53,581 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41249/hbase/data/default/testSequentialEditLogSeqNum/424a9b88c3cd18b9653b7b8f73289859/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T10:25:53,581 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 424a9b88c3cd18b9653b7b8f73289859; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70329324, jitterRate=0.04798859357833862}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T10:25:53,582 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 424a9b88c3cd18b9653b7b8f73289859: Writing region info on filesystem at 1731493553531Initializing all the Stores at 1731493553558 (+27 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493553558Cleaning up temporary data from old regions at 1731493553576 (+18 ms)Region opened successfully at 1731493553582 (+6 ms) 2024-11-13T10:25:53,659 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 424a9b88c3cd18b9653b7b8f73289859 1/1 column families, dataSize=770 B heapSize=1.73 KB 2024-11-13T10:25:53,694 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41249/hbase/data/default/testSequentialEditLogSeqNum/424a9b88c3cd18b9653b7b8f73289859/.tmp/a/9b1f96a00b2d4a62a43a51e1ad3977da is 81, key is testSequentialEditLogSeqNum/a:x0/1731493553582/Put/seqid=0 2024-11-13T10:25:53,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741876_1053 (size=5833) 2024-11-13T10:25:53,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741876_1053 (size=5833) 2024-11-13T10:25:53,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741876_1053 (size=5833) 2024-11-13T10:25:53,716 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=770 B at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:41249/hbase/data/default/testSequentialEditLogSeqNum/424a9b88c3cd18b9653b7b8f73289859/.tmp/a/9b1f96a00b2d4a62a43a51e1ad3977da 2024-11-13T10:25:53,724 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41249/hbase/data/default/testSequentialEditLogSeqNum/424a9b88c3cd18b9653b7b8f73289859/.tmp/a/9b1f96a00b2d4a62a43a51e1ad3977da as hdfs://localhost:41249/hbase/data/default/testSequentialEditLogSeqNum/424a9b88c3cd18b9653b7b8f73289859/a/9b1f96a00b2d4a62a43a51e1ad3977da 2024-11-13T10:25:53,732 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41249/hbase/data/default/testSequentialEditLogSeqNum/424a9b88c3cd18b9653b7b8f73289859/a/9b1f96a00b2d4a62a43a51e1ad3977da, entries=10, sequenceid=13, filesize=5.7 K 2024-11-13T10:25:53,734 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~770 B/770, heapSize ~1.72 KB/1760, currentSize=0 B/0 for 424a9b88c3cd18b9653b7b8f73289859 in 75ms, sequenceid=13, compaction requested=false 2024-11-13T10:25:53,734 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 424a9b88c3cd18b9653b7b8f73289859: 2024-11-13T10:25:53,742 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T10:25:53,743 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T10:25:53,743 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T10:25:53,743 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T10:25:53,743 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T10:25:53,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741874_1051 (size=1844) 2024-11-13T10:25:53,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741874_1051 (size=1844) 2024-11-13T10:25:53,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741874_1051 (size=1844) 2024-11-13T10:25:53,766 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:41249/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1731493553475/wal.1731493553486, size=1.8 K (1844bytes) 2024-11-13T10:25:53,766 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41249/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1731493553475/wal.1731493553486 2024-11-13T10:25:53,767 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:41249/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1731493553475/wal.1731493553486 after 0ms 2024-11-13T10:25:53,770 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:41249/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1731493553475/wal.1731493553486: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-13T10:25:53,772 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:41249/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1731493553475/wal.1731493553486 took 7ms 2024-11-13T10:25:53,786 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:41249/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1731493553475/wal.1731493553486 so closing down 2024-11-13T10:25:53,786 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-13T10:25:53,787 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1731493553486.temp 2024-11-13T10:25:53,789 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testSequentialEditLogSeqNum/424a9b88c3cd18b9653b7b8f73289859/recovered.edits/0000000000000000003-wal.1731493553486.temp 2024-11-13T10:25:53,793 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-13T10:25:53,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741877_1054 (size=1477) 2024-11-13T10:25:53,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741877_1054 (size=1477) 2024-11-13T10:25:53,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741877_1054 (size=1477) 2024-11-13T10:25:53,841 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testSequentialEditLogSeqNum/424a9b88c3cd18b9653b7b8f73289859/recovered.edits/0000000000000000003-wal.1731493553486.temp (wrote 15 edits, skipped 0 edits in 4 ms) 2024-11-13T10:25:53,845 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:41249/hbase/data/default/testSequentialEditLogSeqNum/424a9b88c3cd18b9653b7b8f73289859/recovered.edits/0000000000000000003-wal.1731493553486.temp to hdfs://localhost:41249/hbase/data/default/testSequentialEditLogSeqNum/424a9b88c3cd18b9653b7b8f73289859/recovered.edits/0000000000000000020 2024-11-13T10:25:53,845 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 17 edits across 1 Regions in 65 ms; skipped=2; WAL=hdfs://localhost:41249/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1731493553475/wal.1731493553486, size=1.8 K, length=1844, corrupted=false, cancelled=false 2024-11-13T10:25:53,845 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:41249/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1731493553475/wal.1731493553486, journal: Splitting hdfs://localhost:41249/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1731493553475/wal.1731493553486, size=1.8 K (1844bytes) at 1731493553766Finishing writing output for hdfs://localhost:41249/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1731493553475/wal.1731493553486 so closing down at 1731493553786 (+20 ms)Creating recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testSequentialEditLogSeqNum/424a9b88c3cd18b9653b7b8f73289859/recovered.edits/0000000000000000003-wal.1731493553486.temp at 1731493553789 (+3 ms)3 split writer threads finished at 1731493553794 (+5 ms)Closed recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testSequentialEditLogSeqNum/424a9b88c3cd18b9653b7b8f73289859/recovered.edits/0000000000000000003-wal.1731493553486.temp (wrote 15 edits, skipped 0 edits in 4 ms) at 1731493553841 (+47 ms)Rename recovered edits hdfs://localhost:41249/hbase/data/default/testSequentialEditLogSeqNum/424a9b88c3cd18b9653b7b8f73289859/recovered.edits/0000000000000000003-wal.1731493553486.temp to hdfs://localhost:41249/hbase/data/default/testSequentialEditLogSeqNum/424a9b88c3cd18b9653b7b8f73289859/recovered.edits/0000000000000000020 at 1731493553845 (+4 ms)Processed 17 edits across 1 Regions in 65 ms; skipped=2; WAL=hdfs://localhost:41249/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1731493553475/wal.1731493553486, size=1.8 K, length=1844, corrupted=false, cancelled=false at 1731493553845 2024-11-13T10:25:53,865 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testSequentialEditLogSeqNum Thread=396 (was 393) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2118105142_22 at /127.0.0.1:56622 [Waiting for operation #14] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2118105142_22 at /127.0.0.1:56594 [Waiting for operation #9] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2118105142_22 at /127.0.0.1:45946 [Waiting for operation #11] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) - Thread LEAK? -, OpenFileDescriptor=933 (was 899) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=629 (was 629), ProcessCount=11 (was 11), AvailableMemoryMB=517 (was 578) 2024-11-13T10:25:53,881 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testRegionMadeOfBulkLoadedFilesOnly Thread=396, OpenFileDescriptor=933, MaxFileDescriptor=1048576, SystemLoadAverage=629, ProcessCount=11, AvailableMemoryMB=516 2024-11-13T10:25:53,909 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-13T10:25:53,911 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-13T10:25:53,912 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-13T10:25:53,917 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-94267248, suffix=, logDir=hdfs://localhost:41249/hbase/WALs/hregion-94267248, archiveDir=hdfs://localhost:41249/hbase/oldWALs, maxLogs=32 2024-11-13T10:25:53,934 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-94267248/hregion-94267248.1731493553918, exclude list is [], retry=0 2024-11-13T10:25:53,939 DEBUG [AsyncFSWAL-17-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38649,DS-4caa984b-780d-4d07-9178-6a891c1e8e45,DISK] 2024-11-13T10:25:53,940 DEBUG [AsyncFSWAL-17-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44787,DS-1af27950-119b-421a-9580-8a6fa7d1ebb0,DISK] 2024-11-13T10:25:53,940 DEBUG [AsyncFSWAL-17-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45097,DS-2ac43560-8e20-498a-852c-1b3a1f0157e9,DISK] 2024-11-13T10:25:53,943 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-94267248/hregion-94267248.1731493553918 2024-11-13T10:25:53,944 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45409:45409),(127.0.0.1/127.0.0.1:35745:35745),(127.0.0.1/127.0.0.1:42913:42913)] 2024-11-13T10:25:53,944 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 75fc0cce308e3cfae139ca8a01977942, NAME => 'testRegionMadeOfBulkLoadedFilesOnly,,1731493553909.75fc0cce308e3cfae139ca8a01977942.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testRegionMadeOfBulkLoadedFilesOnly', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41249/hbase 2024-11-13T10:25:53,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741879_1056 (size=70) 2024-11-13T10:25:53,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741879_1056 (size=70) 2024-11-13T10:25:53,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741879_1056 (size=70) 2024-11-13T10:25:54,399 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testRegionMadeOfBulkLoadedFilesOnly,,1731493553909.75fc0cce308e3cfae139ca8a01977942.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T10:25:54,404 INFO [StoreOpener-75fc0cce308e3cfae139ca8a01977942-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 75fc0cce308e3cfae139ca8a01977942 2024-11-13T10:25:54,406 INFO [StoreOpener-75fc0cce308e3cfae139ca8a01977942-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 75fc0cce308e3cfae139ca8a01977942 columnFamilyName a 2024-11-13T10:25:54,406 DEBUG [StoreOpener-75fc0cce308e3cfae139ca8a01977942-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:25:54,407 INFO [StoreOpener-75fc0cce308e3cfae139ca8a01977942-1 {}] regionserver.HStore(327): Store=75fc0cce308e3cfae139ca8a01977942/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:25:54,407 INFO [StoreOpener-75fc0cce308e3cfae139ca8a01977942-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 75fc0cce308e3cfae139ca8a01977942 2024-11-13T10:25:54,409 INFO [StoreOpener-75fc0cce308e3cfae139ca8a01977942-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 75fc0cce308e3cfae139ca8a01977942 columnFamilyName b 2024-11-13T10:25:54,409 DEBUG [StoreOpener-75fc0cce308e3cfae139ca8a01977942-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:25:54,409 INFO [StoreOpener-75fc0cce308e3cfae139ca8a01977942-1 {}] regionserver.HStore(327): Store=75fc0cce308e3cfae139ca8a01977942/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:25:54,409 INFO [StoreOpener-75fc0cce308e3cfae139ca8a01977942-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 75fc0cce308e3cfae139ca8a01977942 2024-11-13T10:25:54,411 INFO [StoreOpener-75fc0cce308e3cfae139ca8a01977942-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 75fc0cce308e3cfae139ca8a01977942 columnFamilyName c 2024-11-13T10:25:54,411 DEBUG [StoreOpener-75fc0cce308e3cfae139ca8a01977942-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:25:54,411 INFO [StoreOpener-75fc0cce308e3cfae139ca8a01977942-1 {}] regionserver.HStore(327): Store=75fc0cce308e3cfae139ca8a01977942/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:25:54,412 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 75fc0cce308e3cfae139ca8a01977942 2024-11-13T10:25:54,412 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/75fc0cce308e3cfae139ca8a01977942 2024-11-13T10:25:54,413 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/75fc0cce308e3cfae139ca8a01977942 2024-11-13T10:25:54,414 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 75fc0cce308e3cfae139ca8a01977942 2024-11-13T10:25:54,414 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 75fc0cce308e3cfae139ca8a01977942 2024-11-13T10:25:54,415 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testRegionMadeOfBulkLoadedFilesOnly descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-13T10:25:54,416 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 75fc0cce308e3cfae139ca8a01977942 2024-11-13T10:25:54,421 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41249/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/75fc0cce308e3cfae139ca8a01977942/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T10:25:54,421 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 75fc0cce308e3cfae139ca8a01977942; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70407262, jitterRate=0.04914996027946472}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-13T10:25:54,423 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 75fc0cce308e3cfae139ca8a01977942: Writing region info on filesystem at 1731493554399Initializing all the Stores at 1731493554400 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493554400Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493554404 (+4 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493554404Cleaning up temporary data from old regions at 1731493554414 (+10 ms)Region opened successfully at 1731493554423 (+9 ms) 2024-11-13T10:25:54,423 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 75fc0cce308e3cfae139ca8a01977942, disabling compactions & flushes 2024-11-13T10:25:54,423 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testRegionMadeOfBulkLoadedFilesOnly,,1731493553909.75fc0cce308e3cfae139ca8a01977942. 2024-11-13T10:25:54,423 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testRegionMadeOfBulkLoadedFilesOnly,,1731493553909.75fc0cce308e3cfae139ca8a01977942. 2024-11-13T10:25:54,423 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testRegionMadeOfBulkLoadedFilesOnly,,1731493553909.75fc0cce308e3cfae139ca8a01977942. after waiting 0 ms 2024-11-13T10:25:54,423 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testRegionMadeOfBulkLoadedFilesOnly,,1731493553909.75fc0cce308e3cfae139ca8a01977942. 2024-11-13T10:25:54,426 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testRegionMadeOfBulkLoadedFilesOnly,,1731493553909.75fc0cce308e3cfae139ca8a01977942. 2024-11-13T10:25:54,426 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 75fc0cce308e3cfae139ca8a01977942: Waiting for close lock at 1731493554423Disabling compacts and flushes for region at 1731493554423Disabling writes for close at 1731493554423Writing region close event to WAL at 1731493554426 (+3 ms)Closed at 1731493554426 2024-11-13T10:25:54,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741878_1055 (size=95) 2024-11-13T10:25:54,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741878_1055 (size=95) 2024-11-13T10:25:54,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741878_1055 (size=95) 2024-11-13T10:25:54,439 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-13T10:25:54,439 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-94267248:(num 1731493553918) 2024-11-13T10:25:54,439 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-13T10:25:54,441 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:41249/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731493553908, archiveDir=hdfs://localhost:41249/hbase/oldWALs, maxLogs=32 2024-11-13T10:25:54,458 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731493553908/wal.1731493554442, exclude list is [], retry=0 2024-11-13T10:25:54,462 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45097,DS-2ac43560-8e20-498a-852c-1b3a1f0157e9,DISK] 2024-11-13T10:25:54,463 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44787,DS-1af27950-119b-421a-9580-8a6fa7d1ebb0,DISK] 2024-11-13T10:25:54,463 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38649,DS-4caa984b-780d-4d07-9178-6a891c1e8e45,DISK] 2024-11-13T10:25:54,469 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731493553908/wal.1731493554442 2024-11-13T10:25:54,472 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42913:42913),(127.0.0.1/127.0.0.1:35745:35745),(127.0.0.1/127.0.0.1:45409:45409)] 2024-11-13T10:25:54,473 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 75fc0cce308e3cfae139ca8a01977942, NAME => 'testRegionMadeOfBulkLoadedFilesOnly,,1731493553909.75fc0cce308e3cfae139ca8a01977942.', STARTKEY => '', ENDKEY => ''} 2024-11-13T10:25:54,473 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testRegionMadeOfBulkLoadedFilesOnly,,1731493553909.75fc0cce308e3cfae139ca8a01977942.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T10:25:54,473 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 75fc0cce308e3cfae139ca8a01977942 2024-11-13T10:25:54,473 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 75fc0cce308e3cfae139ca8a01977942 2024-11-13T10:25:54,475 INFO [StoreOpener-75fc0cce308e3cfae139ca8a01977942-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 75fc0cce308e3cfae139ca8a01977942 2024-11-13T10:25:54,476 INFO [StoreOpener-75fc0cce308e3cfae139ca8a01977942-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 75fc0cce308e3cfae139ca8a01977942 columnFamilyName a 2024-11-13T10:25:54,477 DEBUG [StoreOpener-75fc0cce308e3cfae139ca8a01977942-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:25:54,477 INFO [StoreOpener-75fc0cce308e3cfae139ca8a01977942-1 {}] regionserver.HStore(327): Store=75fc0cce308e3cfae139ca8a01977942/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:25:54,477 INFO [StoreOpener-75fc0cce308e3cfae139ca8a01977942-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 75fc0cce308e3cfae139ca8a01977942 2024-11-13T10:25:54,478 INFO [StoreOpener-75fc0cce308e3cfae139ca8a01977942-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 75fc0cce308e3cfae139ca8a01977942 columnFamilyName b 2024-11-13T10:25:54,478 DEBUG [StoreOpener-75fc0cce308e3cfae139ca8a01977942-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:25:54,479 INFO [StoreOpener-75fc0cce308e3cfae139ca8a01977942-1 {}] regionserver.HStore(327): Store=75fc0cce308e3cfae139ca8a01977942/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:25:54,479 INFO [StoreOpener-75fc0cce308e3cfae139ca8a01977942-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 75fc0cce308e3cfae139ca8a01977942 2024-11-13T10:25:54,480 INFO [StoreOpener-75fc0cce308e3cfae139ca8a01977942-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 75fc0cce308e3cfae139ca8a01977942 columnFamilyName c 2024-11-13T10:25:54,480 DEBUG [StoreOpener-75fc0cce308e3cfae139ca8a01977942-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:25:54,481 INFO [StoreOpener-75fc0cce308e3cfae139ca8a01977942-1 {}] regionserver.HStore(327): Store=75fc0cce308e3cfae139ca8a01977942/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:25:54,481 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 75fc0cce308e3cfae139ca8a01977942 2024-11-13T10:25:54,482 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/75fc0cce308e3cfae139ca8a01977942 2024-11-13T10:25:54,483 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/75fc0cce308e3cfae139ca8a01977942 2024-11-13T10:25:54,485 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 75fc0cce308e3cfae139ca8a01977942 2024-11-13T10:25:54,485 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 75fc0cce308e3cfae139ca8a01977942 2024-11-13T10:25:54,485 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testRegionMadeOfBulkLoadedFilesOnly descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-13T10:25:54,487 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 75fc0cce308e3cfae139ca8a01977942 2024-11-13T10:25:54,488 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 75fc0cce308e3cfae139ca8a01977942; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63552302, jitterRate=-0.05299690365791321}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-13T10:25:54,489 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 75fc0cce308e3cfae139ca8a01977942: Writing region info on filesystem at 1731493554474Initializing all the Stores at 1731493554475 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493554475Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493554475Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493554475Cleaning up temporary data from old regions at 1731493554485 (+10 ms)Region opened successfully at 1731493554489 (+4 ms) 2024-11-13T10:25:54,494 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41249/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile is 28, key is \x0D/a:a/1731493554493/Put/seqid=0 2024-11-13T10:25:54,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741881_1058 (size=4826) 2024-11-13T10:25:54,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741881_1058 (size=4826) 2024-11-13T10:25:54,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741881_1058 (size=4826) 2024-11-13T10:25:54,547 INFO [Time-limited test {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:41249/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile for inclusion in 75fc0cce308e3cfae139ca8a01977942/a 2024-11-13T10:25:54,564 DEBUG [Time-limited test {}] regionserver.HStore(626): HFile bounds: first= last=z 2024-11-13T10:25:54,564 DEBUG [Time-limited test {}] regionserver.HStore(628): Region bounds: first= last= 2024-11-13T10:25:54,564 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 75fc0cce308e3cfae139ca8a01977942: 2024-11-13T10:25:54,566 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41249/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile as hdfs://localhost:41249/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/75fc0cce308e3cfae139ca8a01977942/a/b5550219a5fd4c07a8a9666c23d29208_SeqId_3_ 2024-11-13T10:25:54,568 INFO [Time-limited test {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:41249/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile into 75fc0cce308e3cfae139ca8a01977942/a as hdfs://localhost:41249/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/75fc0cce308e3cfae139ca8a01977942/a/b5550219a5fd4c07a8a9666c23d29208_SeqId_3_ - updating store file list. 2024-11-13T10:25:54,575 DEBUG [Time-limited test {}] regionserver.HStoreFile(483): HFile Bloom filter type for b5550219a5fd4c07a8a9666c23d29208_SeqId_3_: NONE, but ROW specified in column family configuration 2024-11-13T10:25:54,576 INFO [Time-limited test {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:41249/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/75fc0cce308e3cfae139ca8a01977942/a/b5550219a5fd4c07a8a9666c23d29208_SeqId_3_ into 75fc0cce308e3cfae139ca8a01977942/a 2024-11-13T10:25:54,576 INFO [Time-limited test {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:41249/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile into 75fc0cce308e3cfae139ca8a01977942/a (new location: hdfs://localhost:41249/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/75fc0cce308e3cfae139ca8a01977942/a/b5550219a5fd4c07a8a9666c23d29208_SeqId_3_) 2024-11-13T10:25:54,624 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:41249/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731493553908/wal.1731493554442, size=0 (0bytes) 2024-11-13T10:25:54,624 WARN [Time-limited test {}] wal.WALSplitter(453): File hdfs://localhost:41249/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731493553908/wal.1731493554442 might be still open, length is 0 2024-11-13T10:25:54,624 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41249/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731493553908/wal.1731493554442 2024-11-13T10:25:54,625 WARN [IPC Server handler 0 on default port 41249 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731493553908/wal.1731493554442 has not been closed. Lease recovery is in progress. RecoveryId = 1059 for block blk_1073741880_1057 2024-11-13T10:25:54,625 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41249/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731493553908/wal.1731493554442 after 1ms 2024-11-13T10:25:57,258 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2118105142_22 at /127.0.0.1:46050 [Receiving block BP-357896810-172.17.0.2-1731493536919:blk_1073741880_1057] {}] datanode.DataXceiver(331): 127.0.0.1:45097:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46050 dst: /127.0.0.1:45097 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:45097 remote=/127.0.0.1:46050]. Total timeout mills is 60000, 57325 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T10:25:57,258 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2118105142_22 at /127.0.0.1:56804 [Receiving block BP-357896810-172.17.0.2-1731493536919:blk_1073741880_1057] {}] datanode.DataXceiver(331): 127.0.0.1:44787:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56804 dst: /127.0.0.1:44787 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T10:25:57,258 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2118105142_22 at /127.0.0.1:47872 [Receiving block BP-357896810-172.17.0.2-1731493536919:blk_1073741880_1057] {}] datanode.DataXceiver(331): 127.0.0.1:38649:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47872 dst: /127.0.0.1:38649 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T10:25:57,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741880_1059 (size=475) 2024-11-13T10:25:57,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741880_1059 (size=475) 2024-11-13T10:25:58,626 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:41249/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731493553908/wal.1731493554442 after 4002ms 2024-11-13T10:25:58,629 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:41249/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731493553908/wal.1731493554442: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-13T10:25:58,629 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:41249/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731493553908/wal.1731493554442 took 4005ms 2024-11-13T10:25:58,631 INFO [Time-limited test {}] wal.WALSplitter(493): EOF from hdfs://localhost:41249/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731493553908/wal.1731493554442; continuing. 2024-11-13T10:25:58,631 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:41249/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731493553908/wal.1731493554442 so closing down 2024-11-13T10:25:58,631 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-13T10:25:58,633 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000005-wal.1731493554442.temp 2024-11-13T10:25:58,635 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/75fc0cce308e3cfae139ca8a01977942/recovered.edits/0000000000000000005-wal.1731493554442.temp 2024-11-13T10:25:58,635 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-13T10:25:58,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741882_1060 (size=259) 2024-11-13T10:25:58,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741882_1060 (size=259) 2024-11-13T10:25:58,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741882_1060 (size=259) 2024-11-13T10:25:58,656 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/75fc0cce308e3cfae139ca8a01977942/recovered.edits/0000000000000000005-wal.1731493554442.temp (wrote 1 edits, skipped 0 edits in 0 ms) 2024-11-13T10:25:58,658 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:41249/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/75fc0cce308e3cfae139ca8a01977942/recovered.edits/0000000000000000005-wal.1731493554442.temp to hdfs://localhost:41249/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/75fc0cce308e3cfae139ca8a01977942/recovered.edits/0000000000000000005 2024-11-13T10:25:58,658 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 2 edits across 1 Regions in 28 ms; skipped=1; WAL=hdfs://localhost:41249/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731493553908/wal.1731493554442, size=0, length=0, corrupted=false, cancelled=false 2024-11-13T10:25:58,659 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:41249/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731493553908/wal.1731493554442, journal: Splitting hdfs://localhost:41249/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731493553908/wal.1731493554442, size=0 (0bytes) at 1731493554624Finishing writing output for hdfs://localhost:41249/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731493553908/wal.1731493554442 so closing down at 1731493558631 (+4007 ms)Creating recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/75fc0cce308e3cfae139ca8a01977942/recovered.edits/0000000000000000005-wal.1731493554442.temp at 1731493558635 (+4 ms)3 split writer threads finished at 1731493558635Closed recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/75fc0cce308e3cfae139ca8a01977942/recovered.edits/0000000000000000005-wal.1731493554442.temp (wrote 1 edits, skipped 0 edits in 0 ms) at 1731493558656 (+21 ms)Rename recovered edits hdfs://localhost:41249/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/75fc0cce308e3cfae139ca8a01977942/recovered.edits/0000000000000000005-wal.1731493554442.temp to hdfs://localhost:41249/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/75fc0cce308e3cfae139ca8a01977942/recovered.edits/0000000000000000005 at 1731493558658 (+2 ms)Processed 2 edits across 1 Regions in 28 ms; skipped=1; WAL=hdfs://localhost:41249/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731493553908/wal.1731493554442, size=0, length=0, corrupted=false, cancelled=false at 1731493558658 2024-11-13T10:25:58,660 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:41249/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731493553908/wal.1731493554442 to hdfs://localhost:41249/hbase/oldWALs/wal.1731493554442 2024-11-13T10:25:58,661 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:41249/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/75fc0cce308e3cfae139ca8a01977942/recovered.edits/0000000000000000005 2024-11-13T10:25:58,662 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-13T10:25:58,664 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:41249/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731493553908, archiveDir=hdfs://localhost:41249/hbase/oldWALs, maxLogs=32 2024-11-13T10:25:58,677 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731493553908/wal.1731493558664, exclude list is [], retry=0 2024-11-13T10:25:58,680 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44787,DS-1af27950-119b-421a-9580-8a6fa7d1ebb0,DISK] 2024-11-13T10:25:58,680 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45097,DS-2ac43560-8e20-498a-852c-1b3a1f0157e9,DISK] 2024-11-13T10:25:58,681 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38649,DS-4caa984b-780d-4d07-9178-6a891c1e8e45,DISK] 2024-11-13T10:25:58,682 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731493553908/wal.1731493558664 2024-11-13T10:25:58,683 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35745:35745),(127.0.0.1/127.0.0.1:42913:42913),(127.0.0.1/127.0.0.1:45409:45409)] 2024-11-13T10:25:58,683 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 75fc0cce308e3cfae139ca8a01977942, NAME => 'testRegionMadeOfBulkLoadedFilesOnly,,1731493553909.75fc0cce308e3cfae139ca8a01977942.', STARTKEY => '', ENDKEY => ''} 2024-11-13T10:25:58,683 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testRegionMadeOfBulkLoadedFilesOnly,,1731493553909.75fc0cce308e3cfae139ca8a01977942.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T10:25:58,683 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 75fc0cce308e3cfae139ca8a01977942 2024-11-13T10:25:58,683 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 75fc0cce308e3cfae139ca8a01977942 2024-11-13T10:25:58,687 INFO [StoreOpener-75fc0cce308e3cfae139ca8a01977942-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 75fc0cce308e3cfae139ca8a01977942 2024-11-13T10:25:58,688 INFO [StoreOpener-75fc0cce308e3cfae139ca8a01977942-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 75fc0cce308e3cfae139ca8a01977942 columnFamilyName a 2024-11-13T10:25:58,688 DEBUG [StoreOpener-75fc0cce308e3cfae139ca8a01977942-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:25:58,696 DEBUG [StoreFileOpener-75fc0cce308e3cfae139ca8a01977942-a-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for b5550219a5fd4c07a8a9666c23d29208_SeqId_3_: NONE, but ROW specified in column family configuration 2024-11-13T10:25:58,696 DEBUG [StoreOpener-75fc0cce308e3cfae139ca8a01977942-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41249/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/75fc0cce308e3cfae139ca8a01977942/a/b5550219a5fd4c07a8a9666c23d29208_SeqId_3_ 2024-11-13T10:25:58,696 INFO [StoreOpener-75fc0cce308e3cfae139ca8a01977942-1 {}] regionserver.HStore(327): Store=75fc0cce308e3cfae139ca8a01977942/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:25:58,697 INFO [StoreOpener-75fc0cce308e3cfae139ca8a01977942-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 75fc0cce308e3cfae139ca8a01977942 2024-11-13T10:25:58,698 INFO [StoreOpener-75fc0cce308e3cfae139ca8a01977942-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 75fc0cce308e3cfae139ca8a01977942 columnFamilyName b 2024-11-13T10:25:58,698 DEBUG [StoreOpener-75fc0cce308e3cfae139ca8a01977942-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:25:58,698 INFO [StoreOpener-75fc0cce308e3cfae139ca8a01977942-1 {}] regionserver.HStore(327): Store=75fc0cce308e3cfae139ca8a01977942/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:25:58,699 INFO [StoreOpener-75fc0cce308e3cfae139ca8a01977942-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 75fc0cce308e3cfae139ca8a01977942 2024-11-13T10:25:58,699 INFO [StoreOpener-75fc0cce308e3cfae139ca8a01977942-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 75fc0cce308e3cfae139ca8a01977942 columnFamilyName c 2024-11-13T10:25:58,700 DEBUG [StoreOpener-75fc0cce308e3cfae139ca8a01977942-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:25:58,700 INFO [StoreOpener-75fc0cce308e3cfae139ca8a01977942-1 {}] regionserver.HStore(327): Store=75fc0cce308e3cfae139ca8a01977942/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:25:58,700 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 75fc0cce308e3cfae139ca8a01977942 2024-11-13T10:25:58,701 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/75fc0cce308e3cfae139ca8a01977942 2024-11-13T10:25:58,703 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/75fc0cce308e3cfae139ca8a01977942 2024-11-13T10:25:58,703 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:41249/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/75fc0cce308e3cfae139ca8a01977942/recovered.edits/0000000000000000005 2024-11-13T10:25:58,705 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:41249/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/75fc0cce308e3cfae139ca8a01977942/recovered.edits/0000000000000000005: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-13T10:25:58,706 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 1, skipped 0, firstSequenceIdInLog=5, maxSequenceIdInLog=5, path=hdfs://localhost:41249/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/75fc0cce308e3cfae139ca8a01977942/recovered.edits/0000000000000000005 2024-11-13T10:25:58,706 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 75fc0cce308e3cfae139ca8a01977942 3/3 column families, dataSize=58 B heapSize=904 B 2024-11-13T10:25:58,721 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41249/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/75fc0cce308e3cfae139ca8a01977942/.tmp/a/ac5562e183bb4e419dbc54b650947dcd is 62, key is testRegionMadeOfBulkLoadedFilesOnly/a:a/1731493554581/Put/seqid=0 2024-11-13T10:25:58,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741884_1062 (size=5149) 2024-11-13T10:25:58,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741884_1062 (size=5149) 2024-11-13T10:25:58,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741884_1062 (size=5149) 2024-11-13T10:25:58,729 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:41249/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/75fc0cce308e3cfae139ca8a01977942/.tmp/a/ac5562e183bb4e419dbc54b650947dcd 2024-11-13T10:25:58,735 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41249/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/75fc0cce308e3cfae139ca8a01977942/.tmp/a/ac5562e183bb4e419dbc54b650947dcd as hdfs://localhost:41249/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/75fc0cce308e3cfae139ca8a01977942/a/ac5562e183bb4e419dbc54b650947dcd 2024-11-13T10:25:58,741 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41249/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/75fc0cce308e3cfae139ca8a01977942/a/ac5562e183bb4e419dbc54b650947dcd, entries=1, sequenceid=5, filesize=5.0 K 2024-11-13T10:25:58,742 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~58 B/58, heapSize ~376 B/376, currentSize=0 B/0 for 75fc0cce308e3cfae139ca8a01977942 in 36ms, sequenceid=5, compaction requested=false; wal=null 2024-11-13T10:25:58,742 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:41249/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/75fc0cce308e3cfae139ca8a01977942/recovered.edits/0000000000000000005 2024-11-13T10:25:58,743 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 75fc0cce308e3cfae139ca8a01977942 2024-11-13T10:25:58,744 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 75fc0cce308e3cfae139ca8a01977942 2024-11-13T10:25:58,744 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testRegionMadeOfBulkLoadedFilesOnly descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-13T10:25:58,746 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 75fc0cce308e3cfae139ca8a01977942 2024-11-13T10:25:58,748 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41249/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/75fc0cce308e3cfae139ca8a01977942/recovered.edits/5.seqid, newMaxSeqId=5, maxSeqId=1 2024-11-13T10:25:58,749 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 75fc0cce308e3cfae139ca8a01977942; next sequenceid=6; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75154394, jitterRate=0.11988773941993713}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-13T10:25:58,750 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 75fc0cce308e3cfae139ca8a01977942: Writing region info on filesystem at 1731493558683Initializing all the Stores at 1731493558684 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493558684Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493558686 (+2 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493558686Obtaining lock to block concurrent updates at 1731493558706 (+20 ms)Preparing flush snapshotting stores in 75fc0cce308e3cfae139ca8a01977942 at 1731493558706Finished memstore snapshotting testRegionMadeOfBulkLoadedFilesOnly,,1731493553909.75fc0cce308e3cfae139ca8a01977942., syncing WAL and waiting on mvcc, flushsize=dataSize=58, getHeapSize=856, getOffHeapSize=0, getCellsCount=1 at 1731493558706Flushing stores of testRegionMadeOfBulkLoadedFilesOnly,,1731493553909.75fc0cce308e3cfae139ca8a01977942. at 1731493558706Flushing 75fc0cce308e3cfae139ca8a01977942/a: creating writer at 1731493558707 (+1 ms)Flushing 75fc0cce308e3cfae139ca8a01977942/a: appending metadata at 1731493558721 (+14 ms)Flushing 75fc0cce308e3cfae139ca8a01977942/a: closing flushed file at 1731493558721Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@53cb537e: reopening flushed file at 1731493558734 (+13 ms)Finished flush of dataSize ~58 B/58, heapSize ~376 B/376, currentSize=0 B/0 for 75fc0cce308e3cfae139ca8a01977942 in 36ms, sequenceid=5, compaction requested=false; wal=null at 1731493558742 (+8 ms)Cleaning up temporary data from old regions at 1731493558744 (+2 ms)Region opened successfully at 1731493558749 (+5 ms) 2024-11-13T10:25:58,753 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 75fc0cce308e3cfae139ca8a01977942, disabling compactions & flushes 2024-11-13T10:25:58,754 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testRegionMadeOfBulkLoadedFilesOnly,,1731493553909.75fc0cce308e3cfae139ca8a01977942. 2024-11-13T10:25:58,754 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testRegionMadeOfBulkLoadedFilesOnly,,1731493553909.75fc0cce308e3cfae139ca8a01977942. 2024-11-13T10:25:58,754 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testRegionMadeOfBulkLoadedFilesOnly,,1731493553909.75fc0cce308e3cfae139ca8a01977942. after waiting 0 ms 2024-11-13T10:25:58,754 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testRegionMadeOfBulkLoadedFilesOnly,,1731493553909.75fc0cce308e3cfae139ca8a01977942. 2024-11-13T10:25:58,755 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testRegionMadeOfBulkLoadedFilesOnly,,1731493553909.75fc0cce308e3cfae139ca8a01977942. 2024-11-13T10:25:58,755 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 75fc0cce308e3cfae139ca8a01977942: Waiting for close lock at 1731493558753Disabling compacts and flushes for region at 1731493558753Disabling writes for close at 1731493558754 (+1 ms)Writing region close event to WAL at 1731493558755 (+1 ms)Closed at 1731493558755 2024-11-13T10:25:58,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741883_1061 (size=95) 2024-11-13T10:25:58,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741883_1061 (size=95) 2024-11-13T10:25:58,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741883_1061 (size=95) 2024-11-13T10:25:58,760 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-13T10:25:58,760 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1731493558664) 2024-11-13T10:25:58,782 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testRegionMadeOfBulkLoadedFilesOnly Thread=401 (was 396) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1762819471_22 at /127.0.0.1:46076 [Waiting for operation #15] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1396088596) connection to localhost/127.0.0.1:41249 from jenkinstestRegionMadeOfBulkLoadedFilesOnly java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: AsyncFSWAL-17-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkinstestRegionMadeOfBulkLoadedFilesOnly@localhost:41249 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-17-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-17-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/cluster_048ca973-e213-4f85-e39d-c51a07fc85b5/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/cluster_048ca973-e213-4f85-e39d-c51a07fc85b5/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=993 (was 933) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=595 (was 629), ProcessCount=11 (was 11), AvailableMemoryMB=482 (was 516) 2024-11-13T10:25:58,796 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterRegionMovedWithMultiCF Thread=401, OpenFileDescriptor=993, MaxFileDescriptor=1048576, SystemLoadAverage=595, ProcessCount=11, AvailableMemoryMB=482 2024-11-13T10:25:58,811 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-13T10:25:58,816 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-13T10:25:58,820 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 770665a7984d,45401,1731493540547 2024-11-13T10:25:58,823 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@6bbe306c 2024-11-13T10:25:58,824 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-13T10:25:58,826 INFO [HMaster-EventLoopGroup-2-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55772, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-13T10:25:58,831 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45401 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testReplayEditsAfterRegionMovedWithMultiCF', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T10:25:58,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45401 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF 2024-11-13T10:25:58,842 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_PRE_OPERATION 2024-11-13T10:25:58,844 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45401 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testReplayEditsAfterRegionMovedWithMultiCF" procId is: 4 2024-11-13T10:25:58,845 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:25:58,846 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-13T10:25:58,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45401 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T10:25:58,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741885_1063 (size=694) 2024-11-13T10:25:58,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741885_1063 (size=694) 2024-11-13T10:25:58,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741885_1063 (size=694) 2024-11-13T10:25:58,861 INFO [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 70a598aa9b18017afa50633b8eb231df, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsAfterRegionMovedWithMultiCF', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510 2024-11-13T10:25:58,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741886_1064 (size=77) 2024-11-13T10:25:58,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741886_1064 (size=77) 2024-11-13T10:25:58,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741886_1064 (size=77) 2024-11-13T10:25:58,872 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T10:25:58,872 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1722): Closing 70a598aa9b18017afa50633b8eb231df, disabling compactions & flushes 2024-11-13T10:25:58,872 INFO [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. 2024-11-13T10:25:58,872 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. 2024-11-13T10:25:58,872 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. after waiting 0 ms 2024-11-13T10:25:58,872 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. 2024-11-13T10:25:58,872 INFO [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. 2024-11-13T10:25:58,872 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1676): Region close journal for 70a598aa9b18017afa50633b8eb231df: Waiting for close lock at 1731493558872Disabling compacts and flushes for region at 1731493558872Disabling writes for close at 1731493558872Writing region close event to WAL at 1731493558872Closed at 1731493558872 2024-11-13T10:25:58,874 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_ADD_TO_META 2024-11-13T10:25:58,879 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df.","families":{"info":[{"qualifier":"regioninfo","vlen":76,"tag":[],"timestamp":"1731493558874"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731493558874"}]},"ts":"1731493558874"} 2024-11-13T10:25:58,883 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-13T10:25:58,884 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-13T10:25:58,887 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testReplayEditsAfterRegionMovedWithMultiCF","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731493558884"}]},"ts":"1731493558884"} 2024-11-13T10:25:58,891 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testReplayEditsAfterRegionMovedWithMultiCF, state=ENABLING in hbase:meta 2024-11-13T10:25:58,891 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {770665a7984d=0} racks are {/default-rack=0} 2024-11-13T10:25:58,893 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-13T10:25:58,893 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-13T10:25:58,893 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-13T10:25:58,893 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-13T10:25:58,893 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-13T10:25:58,893 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-13T10:25:58,893 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-13T10:25:58,893 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-13T10:25:58,893 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-13T10:25:58,893 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-13T10:25:58,894 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=70a598aa9b18017afa50633b8eb231df, ASSIGN}] 2024-11-13T10:25:58,896 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=70a598aa9b18017afa50633b8eb231df, ASSIGN 2024-11-13T10:25:58,897 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=70a598aa9b18017afa50633b8eb231df, ASSIGN; state=OFFLINE, location=770665a7984d,44657,1731493541444; forceNewPlan=false, retain=false 2024-11-13T10:25:58,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45401 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T10:25:59,050 INFO [770665a7984d:45401 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-13T10:25:59,051 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=70a598aa9b18017afa50633b8eb231df, regionState=OPENING, regionLocation=770665a7984d,44657,1731493541444 2024-11-13T10:25:59,054 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=70a598aa9b18017afa50633b8eb231df, ASSIGN because future has completed 2024-11-13T10:25:59,055 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 70a598aa9b18017afa50633b8eb231df, server=770665a7984d,44657,1731493541444}] 2024-11-13T10:25:59,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45401 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T10:25:59,213 INFO [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. 2024-11-13T10:25:59,213 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 70a598aa9b18017afa50633b8eb231df, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df.', STARTKEY => '', ENDKEY => ''} 2024-11-13T10:25:59,213 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterRegionMovedWithMultiCF 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:25:59,213 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T10:25:59,214 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:25:59,214 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:25:59,215 INFO [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:25:59,217 INFO [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 70a598aa9b18017afa50633b8eb231df columnFamilyName cf1 2024-11-13T10:25:59,217 DEBUG [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:25:59,217 INFO [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] regionserver.HStore(327): Store=70a598aa9b18017afa50633b8eb231df/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:25:59,217 INFO [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:25:59,219 INFO [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 70a598aa9b18017afa50633b8eb231df columnFamilyName cf2 2024-11-13T10:25:59,219 DEBUG [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:25:59,219 INFO [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] regionserver.HStore(327): Store=70a598aa9b18017afa50633b8eb231df/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:25:59,219 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:25:59,220 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df 2024-11-13T10:25:59,221 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df 2024-11-13T10:25:59,221 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:25:59,221 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:25:59,222 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterRegionMovedWithMultiCF descriptor;using region.getMemStoreFlushHeapSize/# of families (64.0 M)) instead. 2024-11-13T10:25:59,223 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:25:59,226 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T10:25:59,226 INFO [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 70a598aa9b18017afa50633b8eb231df; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73408627, jitterRate=0.09387378394603729}}}, FlushLargeStoresPolicy{flushSizeLowerBound=67108864} 2024-11-13T10:25:59,226 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:25:59,227 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 70a598aa9b18017afa50633b8eb231df: Running coprocessor pre-open hook at 1731493559214Writing region info on filesystem at 1731493559214Initializing all the Stores at 1731493559215 (+1 ms)Instantiating store for column family {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493559215Instantiating store for column family {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493559215Cleaning up temporary data from old regions at 1731493559221 (+6 ms)Running coprocessor post-open hooks at 1731493559227 (+6 ms)Region opened successfully at 1731493559227 2024-11-13T10:25:59,230 INFO [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df., pid=6, masterSystemTime=1731493559207 2024-11-13T10:25:59,234 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. 2024-11-13T10:25:59,234 INFO [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. 2024-11-13T10:25:59,235 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=70a598aa9b18017afa50633b8eb231df, regionState=OPEN, openSeqNum=2, regionLocation=770665a7984d,44657,1731493541444 2024-11-13T10:25:59,239 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 70a598aa9b18017afa50633b8eb231df, server=770665a7984d,44657,1731493541444 because future has completed 2024-11-13T10:25:59,250 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-13T10:25:59,250 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 70a598aa9b18017afa50633b8eb231df, server=770665a7984d,44657,1731493541444 in 191 msec 2024-11-13T10:25:59,254 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-13T10:25:59,254 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=70a598aa9b18017afa50633b8eb231df, ASSIGN in 356 msec 2024-11-13T10:25:59,255 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-13T10:25:59,256 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testReplayEditsAfterRegionMovedWithMultiCF","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731493559255"}]},"ts":"1731493559255"} 2024-11-13T10:25:59,259 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testReplayEditsAfterRegionMovedWithMultiCF, state=ENABLED in hbase:meta 2024-11-13T10:25:59,260 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_POST_OPERATION 2024-11-13T10:25:59,267 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF in 427 msec 2024-11-13T10:25:59,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45401 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T10:25:59,481 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testReplayEditsAfterRegionMovedWithMultiCF completed 2024-11-13T10:25:59,481 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testReplayEditsAfterRegionMovedWithMultiCF get assigned. Timeout = 60000ms 2024-11-13T10:25:59,482 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T10:25:59,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testReplayEditsAfterRegionMovedWithMultiCF assigned to meta. Checking AM states. 2024-11-13T10:25:59,489 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T10:25:59,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testReplayEditsAfterRegionMovedWithMultiCF assigned. 2024-11-13T10:25:59,503 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testReplayEditsAfterRegionMovedWithMultiCF', row='r1', locateType=CURRENT is [region=testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df., hostname=770665a7984d,44657,1731493541444, seqNum=2] 2024-11-13T10:25:59,520 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45401 {}] master.HMaster(2410): Client=jenkins//172.17.0.2 move hri=70a598aa9b18017afa50633b8eb231df, source=770665a7984d,44657,1731493541444, destination=770665a7984d,36821,1731493541562, warming up region on 770665a7984d,36821,1731493541562 2024-11-13T10:25:59,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45401 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-13T10:25:59,523 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45401 {}] master.HMaster(2414): Client=jenkins//172.17.0.2 move hri=70a598aa9b18017afa50633b8eb231df, source=770665a7984d,44657,1731493541444, destination=770665a7984d,36821,1731493541562, running balancer 2024-11-13T10:25:59,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45401 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=70a598aa9b18017afa50633b8eb231df, REOPEN/MOVE 2024-11-13T10:25:59,525 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=70a598aa9b18017afa50633b8eb231df, REOPEN/MOVE 2024-11-13T10:25:59,525 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58567, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-13T10:25:59,527 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=7 updating hbase:meta row=70a598aa9b18017afa50633b8eb231df, regionState=CLOSING, regionLocation=770665a7984d,44657,1731493541444 2024-11-13T10:25:59,530 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=70a598aa9b18017afa50633b8eb231df, REOPEN/MOVE because future has completed 2024-11-13T10:25:59,530 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-13T10:25:59,531 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; CloseRegionProcedure 70a598aa9b18017afa50633b8eb231df, server=770665a7984d,44657,1731493541444}] 2024-11-13T10:25:59,531 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36821 {}] regionserver.RSRpcServices(2066): Warmup testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. 2024-11-13T10:25:59,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36821 {}] regionserver.HRegion(7855): Warmup {ENCODED => 70a598aa9b18017afa50633b8eb231df, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df.', STARTKEY => '', ENDKEY => ''} 2024-11-13T10:25:59,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36821 {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T10:25:59,532 INFO [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:25:59,534 INFO [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 70a598aa9b18017afa50633b8eb231df columnFamilyName cf1 2024-11-13T10:25:59,534 DEBUG [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:25:59,534 INFO [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] regionserver.HStore(327): Store=70a598aa9b18017afa50633b8eb231df/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:25:59,534 INFO [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:25:59,535 INFO [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 70a598aa9b18017afa50633b8eb231df columnFamilyName cf2 2024-11-13T10:25:59,535 DEBUG [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:25:59,536 INFO [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] regionserver.HStore(327): Store=70a598aa9b18017afa50633b8eb231df/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:25:59,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36821 {}] regionserver.HRegion(1722): Closing 70a598aa9b18017afa50633b8eb231df, disabling compactions & flushes 2024-11-13T10:25:59,536 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36821 {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. 2024-11-13T10:25:59,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36821 {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. 2024-11-13T10:25:59,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36821 {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. after waiting 0 ms 2024-11-13T10:25:59,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36821 {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. 2024-11-13T10:25:59,537 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36821 {}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. 2024-11-13T10:25:59,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36821 {}] regionserver.HRegion(1676): Region close journal for 70a598aa9b18017afa50633b8eb231df: Waiting for close lock at 1731493559536Disabling compacts and flushes for region at 1731493559536Disabling writes for close at 1731493559536Writing region close event to WAL at 1731493559537 (+1 ms)Closed at 1731493559537 2024-11-13T10:25:59,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45401 {}] procedure.ProcedureSyncWait(219): waitFor pid=7 2024-11-13T10:25:59,690 INFO [RS_CLOSE_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] handler.UnassignRegionHandler(122): Close 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:25:59,690 DEBUG [RS_CLOSE_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-13T10:25:59,691 DEBUG [RS_CLOSE_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1722): Closing 70a598aa9b18017afa50633b8eb231df, disabling compactions & flushes 2024-11-13T10:25:59,691 INFO [RS_CLOSE_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. 2024-11-13T10:25:59,691 DEBUG [RS_CLOSE_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. 2024-11-13T10:25:59,691 DEBUG [RS_CLOSE_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. after waiting 0 ms 2024-11-13T10:25:59,691 DEBUG [RS_CLOSE_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. 2024-11-13T10:25:59,691 INFO [RS_CLOSE_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(2902): Flushing 70a598aa9b18017afa50633b8eb231df 2/2 column families, dataSize=31 B heapSize=616 B 2024-11-13T10:25:59,709 DEBUG [RS_CLOSE_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df/.tmp/cf1/bcb939ad62e64737a5082da6cb4be4c6 is 35, key is r1/cf1:q/1731493559506/Put/seqid=0 2024-11-13T10:25:59,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741887_1065 (size=4783) 2024-11-13T10:25:59,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741887_1065 (size=4783) 2024-11-13T10:25:59,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741887_1065 (size=4783) 2024-11-13T10:25:59,720 INFO [RS_CLOSE_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df/.tmp/cf1/bcb939ad62e64737a5082da6cb4be4c6 2024-11-13T10:25:59,728 DEBUG [RS_CLOSE_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df/.tmp/cf1/bcb939ad62e64737a5082da6cb4be4c6 as hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df/cf1/bcb939ad62e64737a5082da6cb4be4c6 2024-11-13T10:25:59,735 INFO [RS_CLOSE_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df/cf1/bcb939ad62e64737a5082da6cb4be4c6, entries=1, sequenceid=5, filesize=4.7 K 2024-11-13T10:25:59,736 INFO [RS_CLOSE_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~31 B/31, heapSize ~344 B/344, currentSize=0 B/0 for 70a598aa9b18017afa50633b8eb231df in 45ms, sequenceid=5, compaction requested=false 2024-11-13T10:25:59,736 DEBUG [RS_CLOSE_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testReplayEditsAfterRegionMovedWithMultiCF' 2024-11-13T10:25:59,742 DEBUG [RS_CLOSE_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-13T10:25:59,744 INFO [RS_CLOSE_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. 2024-11-13T10:25:59,745 DEBUG [RS_CLOSE_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1676): Region close journal for 70a598aa9b18017afa50633b8eb231df: Waiting for close lock at 1731493559691Running coprocessor pre-close hooks at 1731493559691Disabling compacts and flushes for region at 1731493559691Disabling writes for close at 1731493559691Obtaining lock to block concurrent updates at 1731493559691Preparing flush snapshotting stores in 70a598aa9b18017afa50633b8eb231df at 1731493559691Finished memstore snapshotting testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df., syncing WAL and waiting on mvcc, flushsize=dataSize=31, getHeapSize=584, getOffHeapSize=0, getCellsCount=1 at 1731493559691Flushing stores of testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. at 1731493559692 (+1 ms)Flushing 70a598aa9b18017afa50633b8eb231df/cf1: creating writer at 1731493559692Flushing 70a598aa9b18017afa50633b8eb231df/cf1: appending metadata at 1731493559708 (+16 ms)Flushing 70a598aa9b18017afa50633b8eb231df/cf1: closing flushed file at 1731493559708Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3e1fc953: reopening flushed file at 1731493559727 (+19 ms)Finished flush of dataSize ~31 B/31, heapSize ~344 B/344, currentSize=0 B/0 for 70a598aa9b18017afa50633b8eb231df in 45ms, sequenceid=5, compaction requested=false at 1731493559736 (+9 ms)Writing region close event to WAL at 1731493559737 (+1 ms)Running coprocessor post-close hooks at 1731493559742 (+5 ms)Closed at 1731493559744 (+2 ms) 2024-11-13T10:25:59,745 INFO [RS_CLOSE_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegionServer(3302): Adding 70a598aa9b18017afa50633b8eb231df move to 770665a7984d,36821,1731493541562 record at close sequenceid=5 2024-11-13T10:25:59,749 INFO [RS_CLOSE_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] handler.UnassignRegionHandler(157): Closed 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:25:59,749 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=7 updating hbase:meta row=70a598aa9b18017afa50633b8eb231df, regionState=CLOSED 2024-11-13T10:25:59,752 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE, hasLock=false; CloseRegionProcedure 70a598aa9b18017afa50633b8eb231df, server=770665a7984d,44657,1731493541444 because future has completed 2024-11-13T10:25:59,757 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-13T10:25:59,757 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; CloseRegionProcedure 70a598aa9b18017afa50633b8eb231df, server=770665a7984d,44657,1731493541444 in 223 msec 2024-11-13T10:25:59,758 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=70a598aa9b18017afa50633b8eb231df, REOPEN/MOVE; state=CLOSED, location=770665a7984d,36821,1731493541562; forceNewPlan=false, retain=false 2024-11-13T10:25:59,908 INFO [770665a7984d:45401 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-13T10:25:59,909 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=7 updating hbase:meta row=70a598aa9b18017afa50633b8eb231df, regionState=OPENING, regionLocation=770665a7984d,36821,1731493541562 2024-11-13T10:25:59,913 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=70a598aa9b18017afa50633b8eb231df, REOPEN/MOVE because future has completed 2024-11-13T10:25:59,913 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=7, state=RUNNABLE, hasLock=false; OpenRegionProcedure 70a598aa9b18017afa50633b8eb231df, server=770665a7984d,36821,1731493541562}] 2024-11-13T10:26:00,072 INFO [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] handler.AssignRegionHandler(132): Open testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. 2024-11-13T10:26:00,072 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(7752): Opening region: {ENCODED => 70a598aa9b18017afa50633b8eb231df, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df.', STARTKEY => '', ENDKEY => ''} 2024-11-13T10:26:00,073 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterRegionMovedWithMultiCF 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:26:00,073 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T10:26:00,073 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(7794): checking encryption for 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:26:00,073 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(7797): checking classloading for 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:26:00,075 INFO [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:26:00,076 INFO [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 70a598aa9b18017afa50633b8eb231df columnFamilyName cf1 2024-11-13T10:26:00,076 DEBUG [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:00,083 DEBUG [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df/cf1/bcb939ad62e64737a5082da6cb4be4c6 2024-11-13T10:26:00,083 INFO [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] regionserver.HStore(327): Store=70a598aa9b18017afa50633b8eb231df/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:00,083 INFO [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:26:00,085 INFO [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 70a598aa9b18017afa50633b8eb231df columnFamilyName cf2 2024-11-13T10:26:00,085 DEBUG [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:00,085 INFO [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] regionserver.HStore(327): Store=70a598aa9b18017afa50633b8eb231df/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:00,086 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1038): replaying wal for 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:26:00,086 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df 2024-11-13T10:26:00,088 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df 2024-11-13T10:26:00,089 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1048): stopping wal replay for 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:26:00,089 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1060): Cleaning up temporary data for 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:26:00,090 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterRegionMovedWithMultiCF descriptor;using region.getMemStoreFlushHeapSize/# of families (64.0 M)) instead. 2024-11-13T10:26:00,092 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1093): writing seq id for 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:26:00,093 INFO [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1114): Opened 70a598aa9b18017afa50633b8eb231df; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60385028, jitterRate=-0.10019296407699585}}}, FlushLargeStoresPolicy{flushSizeLowerBound=67108864} 2024-11-13T10:26:00,093 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:26:00,094 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1006): Region open journal for 70a598aa9b18017afa50633b8eb231df: Running coprocessor pre-open hook at 1731493560073Writing region info on filesystem at 1731493560073Initializing all the Stores at 1731493560074 (+1 ms)Instantiating store for column family {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493560074Instantiating store for column family {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493560075 (+1 ms)Cleaning up temporary data from old regions at 1731493560089 (+14 ms)Running coprocessor post-open hooks at 1731493560093 (+4 ms)Region opened successfully at 1731493560093 2024-11-13T10:26:00,095 INFO [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegionServer(2236): Post open deploy tasks for testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df., pid=9, masterSystemTime=1731493560067 2024-11-13T10:26:00,098 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegionServer(2266): Finished post open deploy task for testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. 2024-11-13T10:26:00,098 INFO [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] handler.AssignRegionHandler(153): Opened testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. 2024-11-13T10:26:00,099 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=7 updating hbase:meta row=70a598aa9b18017afa50633b8eb231df, regionState=OPEN, openSeqNum=9, regionLocation=770665a7984d,36821,1731493541562 2024-11-13T10:26:00,102 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE, hasLock=false; OpenRegionProcedure 70a598aa9b18017afa50633b8eb231df, server=770665a7984d,36821,1731493541562 because future has completed 2024-11-13T10:26:00,108 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=7 2024-11-13T10:26:00,108 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; OpenRegionProcedure 70a598aa9b18017afa50633b8eb231df, server=770665a7984d,36821,1731493541562 in 191 msec 2024-11-13T10:26:00,111 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=70a598aa9b18017afa50633b8eb231df, REOPEN/MOVE in 585 msec 2024-11-13T10:26:00,131 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-13T10:26:00,133 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54908, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-13T10:26:00,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44657 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 139 connection: 172.17.0.2:49982 deadline: 1731493620137, exception=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=770665a7984d port=36821 startCode=1731493541562. As of locationSeqNum=5. 2024-11-13T10:26:00,147 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df., hostname=770665a7984d,44657,1731493541444, seqNum=2 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df., hostname=770665a7984d,44657,1731493541444, seqNum=2, error=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=770665a7984d port=36821 startCode=1731493541562. As of locationSeqNum=5. 2024-11-13T10:26:00,148 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df., hostname=770665a7984d,44657,1731493541444, seqNum=2 is org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=770665a7984d port=36821 startCode=1731493541562. As of locationSeqNum=5. 2024-11-13T10:26:00,148 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(84): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df., hostname=770665a7984d,44657,1731493541444, seqNum=2 with the new location region=testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df., hostname=770665a7984d,36821,1731493541562, seqNum=5 constructed by org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=770665a7984d port=36821 startCode=1731493541562. As of locationSeqNum=5. 2024-11-13T10:26:00,260 DEBUG [Async-Client-Retry-Timer-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T10:26:00,262 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54916, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T10:26:00,273 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 70a598aa9b18017afa50633b8eb231df 2/2 column families, dataSize=50 B heapSize=720 B 2024-11-13T10:26:00,297 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df/.tmp/cf1/d6becb2d347f4a709d897e9233f7a2f8 is 29, key is r1/cf1:/1731493560263/DeleteFamily/seqid=0 2024-11-13T10:26:00,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741888_1066 (size=4906) 2024-11-13T10:26:00,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741888_1066 (size=4906) 2024-11-13T10:26:00,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741888_1066 (size=4906) 2024-11-13T10:26:00,319 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=25 B at sequenceid=12 (bloomFilter=false), to=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df/.tmp/cf1/d6becb2d347f4a709d897e9233f7a2f8 2024-11-13T10:26:00,328 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for d6becb2d347f4a709d897e9233f7a2f8 2024-11-13T10:26:00,347 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df/.tmp/cf2/1d5c3675b2ed4f4597682b8e8bcfe621 is 29, key is r1/cf2:/1731493560263/DeleteFamily/seqid=0 2024-11-13T10:26:00,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741889_1067 (size=4906) 2024-11-13T10:26:00,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741889_1067 (size=4906) 2024-11-13T10:26:00,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741889_1067 (size=4906) 2024-11-13T10:26:00,366 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=25 B at sequenceid=12 (bloomFilter=false), to=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df/.tmp/cf2/1d5c3675b2ed4f4597682b8e8bcfe621 2024-11-13T10:26:00,372 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 1d5c3675b2ed4f4597682b8e8bcfe621 2024-11-13T10:26:00,373 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df/.tmp/cf1/d6becb2d347f4a709d897e9233f7a2f8 as hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df/cf1/d6becb2d347f4a709d897e9233f7a2f8 2024-11-13T10:26:00,380 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for d6becb2d347f4a709d897e9233f7a2f8 2024-11-13T10:26:00,381 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df/cf1/d6becb2d347f4a709d897e9233f7a2f8, entries=1, sequenceid=12, filesize=4.8 K 2024-11-13T10:26:00,382 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df/.tmp/cf2/1d5c3675b2ed4f4597682b8e8bcfe621 as hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df/cf2/1d5c3675b2ed4f4597682b8e8bcfe621 2024-11-13T10:26:00,389 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 1d5c3675b2ed4f4597682b8e8bcfe621 2024-11-13T10:26:00,389 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df/cf2/1d5c3675b2ed4f4597682b8e8bcfe621, entries=1, sequenceid=12, filesize=4.8 K 2024-11-13T10:26:00,391 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~50 B/50, heapSize ~688 B/688, currentSize=0 B/0 for 70a598aa9b18017afa50633b8eb231df in 118ms, sequenceid=12, compaction requested=false 2024-11-13T10:26:00,391 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 70a598aa9b18017afa50633b8eb231df: 2024-11-13T10:26:00,395 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-13T10:26:00,396 DEBUG [Time-limited test {}] regionserver.HStore(1541): 70a598aa9b18017afa50633b8eb231df/cf1 is initiating major compaction (all files) 2024-11-13T10:26:00,397 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-13T10:26:00,397 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T10:26:00,397 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 70a598aa9b18017afa50633b8eb231df/cf1 in testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. 2024-11-13T10:26:00,398 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df/cf1/bcb939ad62e64737a5082da6cb4be4c6, hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df/cf1/d6becb2d347f4a709d897e9233f7a2f8] into tmpdir=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df/.tmp, totalSize=9.5 K 2024-11-13T10:26:00,399 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting bcb939ad62e64737a5082da6cb4be4c6, keycount=1, bloomtype=NONE, size=4.7 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1731493559506 2024-11-13T10:26:00,399 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting d6becb2d347f4a709d897e9233f7a2f8, keycount=1, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=9223372036854775807 2024-11-13T10:26:00,412 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 70a598aa9b18017afa50633b8eb231df#cf1#compaction#16 average throughput is 0.00 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T10:26:00,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741890_1068 (size=4626) 2024-11-13T10:26:00,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741890_1068 (size=4626) 2024-11-13T10:26:00,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741890_1068 (size=4626) 2024-11-13T10:26:00,426 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df/.tmp/cf1/6cdd26a37bcd48f98aea20841c0fab48 as hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df/cf1/6cdd26a37bcd48f98aea20841c0fab48 2024-11-13T10:26:00,440 INFO [Time-limited test {}] regionserver.HStore(1337): Completed major compaction of 2 (all) file(s) in 70a598aa9b18017afa50633b8eb231df/cf1 of 70a598aa9b18017afa50633b8eb231df into 6cdd26a37bcd48f98aea20841c0fab48(size=4.5 K), total size for store is 4.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T10:26:00,440 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 70a598aa9b18017afa50633b8eb231df: 2024-11-13T10:26:00,441 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-13T10:26:00,441 DEBUG [Time-limited test {}] regionserver.HStore(1541): 70a598aa9b18017afa50633b8eb231df/cf2 is initiating major compaction (all files) 2024-11-13T10:26:00,441 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-13T10:26:00,441 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T10:26:00,441 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 70a598aa9b18017afa50633b8eb231df/cf2 in testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. 2024-11-13T10:26:00,441 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df/cf2/1d5c3675b2ed4f4597682b8e8bcfe621] into tmpdir=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df/.tmp, totalSize=4.8 K 2024-11-13T10:26:00,442 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 1d5c3675b2ed4f4597682b8e8bcfe621, keycount=1, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=9223372036854775807 2024-11-13T10:26:00,448 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 70a598aa9b18017afa50633b8eb231df#cf2#compaction#17 average throughput is NaN MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T10:26:00,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741891_1069 (size=4592) 2024-11-13T10:26:00,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741891_1069 (size=4592) 2024-11-13T10:26:00,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741891_1069 (size=4592) 2024-11-13T10:26:00,470 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df/.tmp/cf2/2882fc4cc2c04ac0a353e84c3a9e789c as hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df/cf2/2882fc4cc2c04ac0a353e84c3a9e789c 2024-11-13T10:26:00,477 INFO [Time-limited test {}] regionserver.HStore(1337): Completed major compaction of 1 (all) file(s) in 70a598aa9b18017afa50633b8eb231df/cf2 of 70a598aa9b18017afa50633b8eb231df into 2882fc4cc2c04ac0a353e84c3a9e789c(size=4.5 K), total size for store is 4.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T10:26:00,477 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 70a598aa9b18017afa50633b8eb231df: 2024-11-13T10:26:00,482 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45401 {}] master.HMaster(2410): Client=jenkins//172.17.0.2 move hri=70a598aa9b18017afa50633b8eb231df, source=770665a7984d,36821,1731493541562, destination=770665a7984d,44657,1731493541444, warming up region on 770665a7984d,44657,1731493541444 2024-11-13T10:26:00,482 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45401 {}] master.HMaster(2414): Client=jenkins//172.17.0.2 move hri=70a598aa9b18017afa50633b8eb231df, source=770665a7984d,36821,1731493541562, destination=770665a7984d,44657,1731493541444, running balancer 2024-11-13T10:26:00,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45401 {}] procedure2.ProcedureExecutor(1139): Stored pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=70a598aa9b18017afa50633b8eb231df, REOPEN/MOVE 2024-11-13T10:26:00,484 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=70a598aa9b18017afa50633b8eb231df, REOPEN/MOVE 2024-11-13T10:26:00,486 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44657 {}] regionserver.RSRpcServices(2066): Warmup testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. 2024-11-13T10:26:00,486 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=70a598aa9b18017afa50633b8eb231df, regionState=CLOSING, regionLocation=770665a7984d,36821,1731493541562 2024-11-13T10:26:00,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44657 {}] regionserver.HRegion(7855): Warmup {ENCODED => 70a598aa9b18017afa50633b8eb231df, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df.', STARTKEY => '', ENDKEY => ''} 2024-11-13T10:26:00,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44657 {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T10:26:00,487 INFO [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:26:00,488 INFO [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 70a598aa9b18017afa50633b8eb231df columnFamilyName cf1 2024-11-13T10:26:00,488 DEBUG [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:00,489 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=70a598aa9b18017afa50633b8eb231df, REOPEN/MOVE because future has completed 2024-11-13T10:26:00,490 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-13T10:26:00,490 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE, hasLock=false; CloseRegionProcedure 70a598aa9b18017afa50633b8eb231df, server=770665a7984d,36821,1731493541562}] 2024-11-13T10:26:00,496 DEBUG [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df/cf1/6cdd26a37bcd48f98aea20841c0fab48 2024-11-13T10:26:00,501 DEBUG [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df/cf1/bcb939ad62e64737a5082da6cb4be4c6 2024-11-13T10:26:00,507 INFO [StoreFileOpener-70a598aa9b18017afa50633b8eb231df-cf1-1 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for d6becb2d347f4a709d897e9233f7a2f8 2024-11-13T10:26:00,507 DEBUG [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df/cf1/d6becb2d347f4a709d897e9233f7a2f8 2024-11-13T10:26:00,507 INFO [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] regionserver.HStore(327): Store=70a598aa9b18017afa50633b8eb231df/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:00,508 INFO [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:26:00,509 INFO [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 70a598aa9b18017afa50633b8eb231df columnFamilyName cf2 2024-11-13T10:26:00,509 DEBUG [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:00,515 INFO [StoreFileOpener-70a598aa9b18017afa50633b8eb231df-cf2-1 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 1d5c3675b2ed4f4597682b8e8bcfe621 2024-11-13T10:26:00,515 DEBUG [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df/cf2/1d5c3675b2ed4f4597682b8e8bcfe621 2024-11-13T10:26:00,519 DEBUG [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df/cf2/2882fc4cc2c04ac0a353e84c3a9e789c 2024-11-13T10:26:00,520 INFO [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] regionserver.HStore(327): Store=70a598aa9b18017afa50633b8eb231df/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:00,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44657 {}] regionserver.HRegion(1722): Closing 70a598aa9b18017afa50633b8eb231df, disabling compactions & flushes 2024-11-13T10:26:00,520 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44657 {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. 2024-11-13T10:26:00,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44657 {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. 2024-11-13T10:26:00,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44657 {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. after waiting 0 ms 2024-11-13T10:26:00,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44657 {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. 2024-11-13T10:26:00,521 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44657 {}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. 2024-11-13T10:26:00,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44657 {}] regionserver.HRegion(1676): Region close journal for 70a598aa9b18017afa50633b8eb231df: Waiting for close lock at 1731493560520Disabling compacts and flushes for region at 1731493560520Disabling writes for close at 1731493560520Writing region close event to WAL at 1731493560521 (+1 ms)Closed at 1731493560521 2024-11-13T10:26:00,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45401 {}] procedure.ProcedureSyncWait(219): waitFor pid=10 2024-11-13T10:26:00,645 INFO [RS_CLOSE_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] handler.UnassignRegionHandler(122): Close 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:26:00,645 DEBUG [RS_CLOSE_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-13T10:26:00,645 DEBUG [RS_CLOSE_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1722): Closing 70a598aa9b18017afa50633b8eb231df, disabling compactions & flushes 2024-11-13T10:26:00,645 INFO [RS_CLOSE_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. 2024-11-13T10:26:00,645 DEBUG [RS_CLOSE_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. 2024-11-13T10:26:00,645 DEBUG [RS_CLOSE_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. after waiting 0 ms 2024-11-13T10:26:00,645 DEBUG [RS_CLOSE_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. 2024-11-13T10:26:00,646 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df/cf1/bcb939ad62e64737a5082da6cb4be4c6, hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df/cf1/d6becb2d347f4a709d897e9233f7a2f8] to archive 2024-11-13T10:26:00,650 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-13T10:26:00,654 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df/cf1/bcb939ad62e64737a5082da6cb4be4c6 to hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/archive/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df/cf1/bcb939ad62e64737a5082da6cb4be4c6 2024-11-13T10:26:00,656 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df/cf1/d6becb2d347f4a709d897e9233f7a2f8 to hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/archive/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df/cf1/d6becb2d347f4a709d897e9233f7a2f8 2024-11-13T10:26:00,670 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df/cf2/1d5c3675b2ed4f4597682b8e8bcfe621] to archive 2024-11-13T10:26:00,672 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-13T10:26:00,673 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df/cf2/1d5c3675b2ed4f4597682b8e8bcfe621 to hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/archive/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df/cf2/1d5c3675b2ed4f4597682b8e8bcfe621 2024-11-13T10:26:00,679 DEBUG [RS_CLOSE_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df/recovered.edits/17.seqid, newMaxSeqId=17, maxSeqId=8 2024-11-13T10:26:00,680 INFO [RS_CLOSE_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. 2024-11-13T10:26:00,680 DEBUG [RS_CLOSE_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1676): Region close journal for 70a598aa9b18017afa50633b8eb231df: Waiting for close lock at 1731493560645Running coprocessor pre-close hooks at 1731493560645Disabling compacts and flushes for region at 1731493560645Disabling writes for close at 1731493560645Writing region close event to WAL at 1731493560675 (+30 ms)Running coprocessor post-close hooks at 1731493560680 (+5 ms)Closed at 1731493560680 2024-11-13T10:26:00,680 INFO [RS_CLOSE_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegionServer(3302): Adding 70a598aa9b18017afa50633b8eb231df move to 770665a7984d,44657,1731493541444 record at close sequenceid=12 2024-11-13T10:26:00,683 INFO [RS_CLOSE_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] handler.UnassignRegionHandler(157): Closed 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:26:00,684 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=70a598aa9b18017afa50633b8eb231df, regionState=CLOSED 2024-11-13T10:26:00,687 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=10, state=RUNNABLE, hasLock=false; CloseRegionProcedure 70a598aa9b18017afa50633b8eb231df, server=770665a7984d,36821,1731493541562 because future has completed 2024-11-13T10:26:00,690 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=10 2024-11-13T10:26:00,690 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=10, state=SUCCESS, hasLock=false; CloseRegionProcedure 70a598aa9b18017afa50633b8eb231df, server=770665a7984d,36821,1731493541562 in 198 msec 2024-11-13T10:26:00,691 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=70a598aa9b18017afa50633b8eb231df, REOPEN/MOVE; state=CLOSED, location=770665a7984d,44657,1731493541444; forceNewPlan=false, retain=false 2024-11-13T10:26:00,842 INFO [770665a7984d:45401 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-13T10:26:00,842 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=70a598aa9b18017afa50633b8eb231df, regionState=OPENING, regionLocation=770665a7984d,44657,1731493541444 2024-11-13T10:26:00,845 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=70a598aa9b18017afa50633b8eb231df, REOPEN/MOVE because future has completed 2024-11-13T10:26:00,845 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 70a598aa9b18017afa50633b8eb231df, server=770665a7984d,44657,1731493541444}] 2024-11-13T10:26:01,002 INFO [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. 2024-11-13T10:26:01,002 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 70a598aa9b18017afa50633b8eb231df, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df.', STARTKEY => '', ENDKEY => ''} 2024-11-13T10:26:01,003 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterRegionMovedWithMultiCF 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:26:01,003 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T10:26:01,003 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:26:01,003 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:26:01,005 INFO [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:26:01,006 INFO [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 70a598aa9b18017afa50633b8eb231df columnFamilyName cf1 2024-11-13T10:26:01,006 DEBUG [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:01,012 DEBUG [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df/cf1/6cdd26a37bcd48f98aea20841c0fab48 2024-11-13T10:26:01,012 INFO [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] regionserver.HStore(327): Store=70a598aa9b18017afa50633b8eb231df/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:01,013 INFO [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:26:01,014 INFO [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 70a598aa9b18017afa50633b8eb231df columnFamilyName cf2 2024-11-13T10:26:01,014 DEBUG [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:01,021 DEBUG [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df/cf2/2882fc4cc2c04ac0a353e84c3a9e789c 2024-11-13T10:26:01,022 INFO [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] regionserver.HStore(327): Store=70a598aa9b18017afa50633b8eb231df/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:01,022 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:26:01,023 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df 2024-11-13T10:26:01,024 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df 2024-11-13T10:26:01,025 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:26:01,025 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:26:01,026 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterRegionMovedWithMultiCF descriptor;using region.getMemStoreFlushHeapSize/# of families (64.0 M)) instead. 2024-11-13T10:26:01,027 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:26:01,028 INFO [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 70a598aa9b18017afa50633b8eb231df; next sequenceid=18; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70065542, jitterRate=0.0440579354763031}}}, FlushLargeStoresPolicy{flushSizeLowerBound=67108864} 2024-11-13T10:26:01,028 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:26:01,029 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 70a598aa9b18017afa50633b8eb231df: Running coprocessor pre-open hook at 1731493561003Writing region info on filesystem at 1731493561003Initializing all the Stores at 1731493561004 (+1 ms)Instantiating store for column family {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493561004Instantiating store for column family {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493561004Cleaning up temporary data from old regions at 1731493561025 (+21 ms)Running coprocessor post-open hooks at 1731493561028 (+3 ms)Region opened successfully at 1731493561029 (+1 ms) 2024-11-13T10:26:01,030 INFO [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df., pid=12, masterSystemTime=1731493560998 2024-11-13T10:26:01,033 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. 2024-11-13T10:26:01,033 INFO [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. 2024-11-13T10:26:01,034 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=70a598aa9b18017afa50633b8eb231df, regionState=OPEN, openSeqNum=18, regionLocation=770665a7984d,44657,1731493541444 2024-11-13T10:26:01,036 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 70a598aa9b18017afa50633b8eb231df, server=770665a7984d,44657,1731493541444 because future has completed 2024-11-13T10:26:01,040 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=10 2024-11-13T10:26:01,040 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 70a598aa9b18017afa50633b8eb231df, server=770665a7984d,44657,1731493541444 in 193 msec 2024-11-13T10:26:01,043 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=70a598aa9b18017afa50633b8eb231df, REOPEN/MOVE in 558 msec 2024-11-13T10:26:01,063 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsAfterRegionMovedWithMultiCF 2024-11-13T10:26:01,063 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsAfterRegionMovedWithMultiCF Metrics about Tables on a single HBase RegionServer 2024-11-13T10:26:01,064 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testRegionMadeOfBulkLoadedFilesOnly 2024-11-13T10:26:01,064 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testRegionMadeOfBulkLoadedFilesOnly Metrics about Tables on a single HBase RegionServer 2024-11-13T10:26:01,065 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testSequentialEditLogSeqNum 2024-11-13T10:26:01,066 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testSequentialEditLogSeqNum Metrics about Tables on a single HBase RegionServer 2024-11-13T10:26:01,086 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-13T10:26:01,088 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36438, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-13T10:26:01,090 ERROR [Time-limited test {}] regionserver.HRegionServer(2442): ***** ABORTING region server 770665a7984d,44657,1731493541444: testing ***** 2024-11-13T10:26:01,090 ERROR [Time-limited test {}] regionserver.HRegionServer(2447): RegionServer abort: loaded coprocessors are: [org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint] 2024-11-13T10:26:01,093 DEBUG [Time-limited test {}] util.JSONBean(135): Listing beans for java.lang:type=Memory 2024-11-13T10:26:01,095 DEBUG [Time-limited test {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=IPC 2024-11-13T10:26:01,100 DEBUG [Time-limited test {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Replication 2024-11-13T10:26:01,102 DEBUG [Time-limited test {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Server 2024-11-13T10:26:01,119 INFO [Time-limited test {}] regionserver.HRegionServer(2451): Dump of metrics as JSON on abort: { "beans": [ { "name": "java.lang:type=Memory", "modelerType": "sun.management.MemoryImpl", "ObjectPendingFinalizationCount": 0, "HeapMemoryUsage": { "committed": 1048576000, "init": 1048576000, "max": 2306867200, "used": 343070144 }, "NonHeapMemoryUsage": { "committed": 172032000, "init": 7667712, "max": -1, "used": 169405472 }, "Verbose": false, "ObjectName": "java.lang:type=Memory" } ], "beans": [ { "name": "Hadoop:service=HBase,name=RegionServer,sub=IPC", "modelerType": "RegionServer,sub=IPC", "tag.Context": "regionserver", "tag.Hostname": "770665a7984d", "queueSize": 0, "numCallsInGeneralQueue": 0, "numCallsInReplicationQueue": 0, "numCallsInBulkLoadQueue": 0, "numCallsInPriorityQueue": 0, "numCallsInMetaPriorityQueue": 0, "numOpenConnections": 4, "numActiveHandler": 0, "numActiveGeneralHandler": 0, "numActivePriorityHandler": 0, "numActiveReplicationHandler": 0, "numGeneralCallsDropped": 0, "numLifoModeSwitches": 0, "numCallsInWriteQueue": 0, "numActiveBulkLoadHandler": 0, "numCallsInReadQueue": 0, "numCallsInScanQueue": 0, "numActiveWriteHandler": 0, "numActiveReadHandler": 0, "numActiveScanHandler": 0, "nettyDirectMemoryUsage": 67108864, "nettyTotalPendingOutboundBytes": 0, "nettyMaxPendingOutboundBytes": 0, "receivedBytes": 10271, "exceptions.RegionMovedException": 1, "authenticationSuccesses": 0, "authorizationFailures": 0, "exceptions.requestTooBig": 0, "UnwritableTime_num_ops": 0, "UnwritableTime_min": 0, "UnwritableTime_max": 0, "UnwritableTime_mean": 0, "UnwritableTime_25th_percentile": 0, "UnwritableTime_median": 0, "UnwritableTime_75th_percentile": 0, "UnwritableTime_90th_percentile": 0, "UnwritableTime_95th_percentile": 0, "UnwritableTime_98th_percentile": 0, "UnwritableTime_99th_percentile": 0, "UnwritableTime_99.9th_percentile": 0, "exceptions.OutOfOrderScannerNextException": 0, "exceptions.rpcThrottling": 0, "exceptions.otherExceptions": 0, "ProcessCallTime_num_ops": 39, "ProcessCallTime_min": 1, "ProcessCallTime_max": 38, "ProcessCallTime_mean": 3, "ProcessCallTime_25th_percentile": 1, "ProcessCallTime_median": 2, "ProcessCallTime_75th_percentile": 3, "ProcessCallTime_90th_percentile": 5, "ProcessCallTime_95th_percentile": 12, "ProcessCallTime_98th_percentile": 38, "ProcessCallTime_99th_percentile": 38, "ProcessCallTime_99.9th_percentile": 38, "ProcessCallTime_TimeRangeCount_0-1": 11, "ProcessCallTime_TimeRangeCount_1-3": 13, "ProcessCallTime_TimeRangeCount_3-10": 5, "ProcessCallTime_TimeRangeCount_10-30": 1, "ProcessCallTime_TimeRangeCount_30-100": 1, "exceptions.callQueueTooBig": 0, "QueueCallTime_num_ops": 39, "QueueCallTime_min": 0, "QueueCallTime_max": 1, "QueueCallTime_mean": 0, "QueueCallTime_25th_percentile": 0, "QueueCallTime_median": 0, "QueueCallTime_75th_percentile": 0, "QueueCallTime_90th_percentile": 0, "QueueCallTime_95th_percentile": 0, "QueueCallTime_98th_percentile": 1, "QueueCallTime_99th_percentile": 1, "QueueCallTime_99.9th_percentile": 1, "QueueCallTime_TimeRangeCount_0-1": 31, "authenticationFailures": 0, "exceptions.multiResponseTooLarge": 0, "exceptions.callDropped": 0, "TotalCallTime_num_ops": 39, "TotalCallTime_min": 1, "TotalCallTime_max": 38, "TotalCallTime_mean": 3, "TotalCallTime_25th_percentile": 1, "TotalCallTime_median": 2, "TotalCallTime_75th_percentile": 3, "TotalCallTime_90th_percentile": 5, "TotalCallTime_95th_percentile": 12, "TotalCallTime_98th_percentile": 38, "TotalCallTime_99th_percentile": 38, "TotalCallTime_99.9th_percentile": 38, "TotalCallTime_TimeRangeCount_0-1": 10, "TotalCallTime_TimeRangeCount_1-3": 14, "TotalCallTime_TimeRangeCount_3-10": 5, "TotalCallTime_TimeRangeCount_10-30": 1, "TotalCallTime_TimeRangeCount_30-100": 1, "exceptions.RegionTooBusyException": 0, "exceptions.FailedSanityCheckException": 0, "ResponseSize_num_ops": 39, "ResponseSize_min": 0, "ResponseSize_max": 1139, "ResponseSize_mean": 160, "ResponseSize_25th_percentile": 2, "ResponseSize_median": 2, "ResponseSize_75th_percentile": 74, "ResponseSize_90th_percentile": 453, "ResponseSize_95th_percentile": 796, "ResponseSize_98th_percentile": 1001, "ResponseSize_99th_percentile": 1070, "ResponseSize_99.9th_percentile": 1132, "ResponseSize_SizeRangeCount_0-10": 22, "ResponseSize_SizeRangeCount_10-100": 4, "ResponseSize_SizeRangeCount_100-1000": 5, "exceptions.UnknownScannerException": 0, "exceptions": 1, "maxOutboundBytesExceeded": 0, "authenticationFallbacks": 0, "exceptions.quotaExceeded": 0, "exceptions.callTimedOut": 0, "exceptions.NotServingRegionException": 0, "authorizationSuccesses": 0, "exceptions.ScannerResetException": 0, "RequestSize_num_ops": 39, "RequestSize_min": 31, "RequestSize_max": 390, "RequestSize_mean": 204, "RequestSize_25th_percentile": 119, "RequestSize_median": 179, "RequestSize_75th_percentile": 330, "RequestSize_90th_percentile": 330, "RequestSize_95th_percentile": 365, "RequestSize_98th_percentile": 380, "RequestSize_99th_percentile": 385, "RequestSize_99.9th_percentile": 389, "RequestSize_SizeRangeCount_0-10": 2, "RequestSize_SizeRangeCount_100-1000": 29, "sentBytes": 5781 } ], "beans": [ { "name": "Hadoop:service=HBase,name=RegionServer,sub=Replication", "modelerType": "RegionServer,sub=Replication", "tag.Context": "regionserver", "tag.Hostname": "770665a7984d", "source.shippedHFiles": 0, "Source.ageOfLastShippedOp_num_ops": 0, "Source.ageOfLastShippedOp_min": 0, "Source.ageOfLastShippedOp_max": 0, "Source.ageOfLastShippedOp_mean": 0, "Source.ageOfLastShippedOp_25th_percentile": 0, "Source.ageOfLastShippedOp_median": 0, "Source.ageOfLastShippedOp_75th_percentile": 0, "Source.ageOfLastShippedOp_90th_percentile": 0, "Source.ageOfLastShippedOp_95th_percentile": 0, "Source.ageOfLastShippedOp_98th_percentile": 0, "Source.ageOfLastShippedOp_99th_percentile": 0, "Source.ageOfLastShippedOp_99.9th_percentile": 0, "source.uncleanlyClosedLogs": 0, "source.closedLogsWithUnknownFileLength": 0, "source.walReaderEditsBufferUsage": 0, "source.repeatedLogFileBytes": 0, "source.sizeOfHFileRefsQueue": 0, "source.logReadInBytes": 0, "source.completedRecoverQueues": 0, "source.sizeOfLogQueue": 0, "source.restartedLogReading": 0, "source.failedRecoverQueues": 0, "source.ignoredUncleanlyClosedLogContentsInBytes": 0, "Sink.ageOfLastAppliedOp_num_ops": 0, "Sink.ageOfLastAppliedOp_min": 0, "Sink.ageOfLastAppliedOp_max": 0, "Sink.ageOfLastAppliedOp_mean": 0, "Sink.ageOfLastAppliedOp_25th_percentile": 0, "Sink.ageOfLastAppliedOp_median": 0, "Sink.ageOfLastAppliedOp_75th_percentile": 0, "Sink.ageOfLastAppliedOp_90th_percentile": 0, "Sink.ageOfLastAppliedOp_95th_percentile": 0, "Sink.ageOfLastAppliedOp_98th_percentile": 0, "Sink.ageOfLastAppliedOp_99th_percentile": 0, "Sink.ageOfLastAppliedOp_99.9th_percentile": 0, "source.logEditsRead": 0, "source.numInitializing": 0, "source.shippedOps": 0, "sink.appliedHFiles": 0, "source.logEditsFiltered": 0, "source.shippedBytes": 0, "sink.appliedOps": 0, "source.completedLogs": 0, "source.failedBatches": 0, "sink.failedBatches": 0, "source.shippedBatches": 0, "sink.appliedBatches": 0 } ], "beans": [ { "name": "Hadoop:service=HBase,name=RegionServer,sub=Server", "modelerType": "RegionServer,sub=Server", "tag.zookeeperQuorum": "127.0.0.1:51925", "tag.serverName": "770665a7984d,36821,1731493541562", "tag.clusterId": "b9fd5f8e-ec8b-4cb8-95d4-350ee72f0575", "tag.Context": "regionserver", "tag.Hostname": "770665a7984d", "regionCount": 0, "storeCount": 0, "hlogFileCount": 3, "hlogFileSize": 0, "storeFileCount": 0, "maxStoreFileCount": 0, "memStoreSize": 0, "memStoreHeapSize": 0, "memStoreOffHeapSize": 0, "storeFileSize": 0, "storeFileSizeGrowthRate": 0.0, "maxStoreFileAge": 0, "minStoreFileAge": 0, "avgStoreFileAge": 0, "numReferenceFiles": 0, "regionServerStartTime": 1731493541562, "averageRegionSize": 0, "storeFileIndexSize": 0, "staticIndexSize": 0, "staticBloomSize": 0, "bloomFilterRequestsCount": 0, "bloomFilterNegativeResultsCount": 0, "bloomFilterEligibleRequestsCount": 0, "mutationsWithoutWALCount": 0, "mutationsWithoutWALSize": 0, "percentFilesLocal": 0.0, "percentFilesLocalSecondaryRegions": 0.0, "totalBytesRead": 320685, "localBytesRead": 320685, "shortCircuitBytesRead": 0, "zeroCopyBytesRead": 0, "splitQueueLength": 0, "compactionQueueLength": 0, "smallCompactionQueueLength": 0, "largeCompactionQueueLength": 0, "flushQueueLength": 0, "blockCacheFreeSize": 922070024, "blockCacheCount": 0, "blockCacheDataBlockCount": 0, "blockCacheSize": 676856, "blockCacheCountHitPercent": 25.0, "blockCacheExpressHitPercent": 0.0, "l1CacheSize": 676856, "l1CacheFreeSize": 922070024, "l1CacheCount": 0, "l1CacheEvictionCount": 0, "l1CacheHitCount": 1, "l1CacheMissCount": 3, "l1CacheHitRatio": 0.25, "l1CacheMissRatio": 0.75, "l2CacheSize": 0, "l2CacheFreeSize": 0, "l2CacheCount": 0, "l2CacheEvictionCount": 0, "l2CacheHitCount": 0, "l2CacheMissCount": 0, "l2CacheHitRatio": 0.0, "l2CacheMissRatio": 0.0, "mobFileCacheCount": 0, "mobFileCacheHitPercent": 0.0, "readRequestRatePerSecond": 0.0, "writeRequestRatePerSecond": 0.0, "ByteBuffAllocatorHeapAllocationBytes": 227435, "ByteBuffAllocatorPoolAllocationBytes": 0, "ByteBuffAllocatorHeapAllocationRatio": 1.0, "ByteBuffAllocatorTotalBufferCount": 186, "ByteBuffAllocatorUsedBufferCount": 0, "activeScanners": 0, "totalRequestCount": 3, "totalRowActionRequestCount": 0, "readRequestCount": 0, "cpRequestCount": 0, "filteredReadRequestCount": 0, "writeRequestCount": 0, "rpcGetRequestCount": 0, "rpcFullScanRequestCount": 1, "rpcScanRequestCount": 1, "rpcMultiRequestCount": 0, "rpcMutateRequestCount": 1, "checkMutateFailedCount": 0, "checkMutatePassedCount": 0, "blockCacheHitCount": 1, "blockCacheHitCountPrimary": 1, "blockCacheHitCachingCount": 0, "blockCacheMissCount": 3, "blockCacheMissCountPrimary": 3, "blockCacheMissCachingCount": 1, "blockCacheEvictionCount": 0, "blockCacheEvictionCountPrimary": 0, "blockCacheFailedInsertionCount": 0, "blockCacheDataMissCount": 3, "blockCacheLeafIndexMissCount": 0, "blockCacheBloomChunkMissCount": 0, "blockCacheMetaMissCount": 0, "blockCacheRootIndexMissCount": 0, "blockCacheIntermediateIndexMissCount": 0, "blockCacheFileInfoMissCount": 0, "blockCacheGeneralBloomMetaMissCount": 0, "blockCacheDeleteFamilyBloomMissCount": 0, "blockCacheTrailerMissCount": 0, "blockCacheDataHitCount": 1, "blockCacheLeafIndexHitCount": 0, "blockCacheBloomChunkHitCount": 0, "blockCacheMetaHitCount": 0, "blockCacheRootIndexHitCount": 0, "blockCacheIntermediateIndexHitCount": 0, "blockCacheFileInfoHitCount": 0, "blockCacheGeneralBloomMetaHitCount": 0, "blockCacheDeleteFamilyBloomHitCount": 0, "blockCacheTrailerHitCount": 0, "updatesBlockedTime": 0, "flushedCellsCount": 0, "compactedCellsCount": 0, "majorCompactedCellsCount": 0, "flushedCellsSize": 0, "compactedCellsSize": 0, "majorCompactedCellsSize": 0, "cellsCountCompactedFromMob": 0, "cellsCountCompactedToMob": 0, "cellsSizeCompactedFromMob": 0, "cellsSizeCompactedToMob": 0, "mobFlushCount": 0, "mobFlushedCellsCount": 0, "mobFlushedCellsSize": 0, "mobScanCellsCount": 0, "mobScanCellsSize": 0, "mobFileCacheAccessCount": 0, "mobFileCacheMissCount": 0, "mobFileCacheEvictedCount": 0, "hedgedReads": 0, "hedgedReadWins": 0, "hedgedReadOpsInCurThread": 0, "blockedRequestCount": 0, "CheckAndMutate_num_ops": 0, "CheckAndMutate_min": 0, "CheckAndMutate_max": 0, "CheckAndMutate_mean": 0, "CheckAndMutate_25th_percentile": 0, "CheckAndMutate_median": 0, "CheckAndMutate_75th_percentile": 0, "CheckAndMutate_90th_percentile": 0, "CheckAndMutate_95th_percentile": 0, "CheckAndMutate_98th_percentile": 0, "CheckAndMutate_99th_percentile": 0, "CheckAndMutate_99.9th_percentile": 0, "MajorCompactionTime_num_ops": 2, "MajorCompactionTime_min": 35, "MajorCompactionTime_max": 41, "MajorCompactionTime_mean": 38, "MajorCompactionTime_25th_percentile": 35, "MajorCompactionTime_median": 36, "MajorCompactionTime_75th_percentile": 41, "MajorCompactionTime_90th_percentile": 41, "MajorCompactionTime_95th_percentile": 41, "MajorCompactionTime_98th_percentile": 41, "MajorCompactionTime_99th_percentile": 41, "MajorCompactionTime_99.9th_percentile": 41, "MajorCompactionTime_TimeRangeCount_30-100": 2, "ScanTime_num_ops": 1, "ScanTime_min": 1, "ScanTime_max": 1, "ScanTime_mean": 1, "ScanTime_25th_percentile": 1, "ScanTime_median": 1, "ScanTime_75th_percentile": 1, "ScanTime_90th_percentile": 1, "ScanTime_95th_percentile": 1, "ScanTime_98th_percentile": 1, "ScanTime_99th_percentile": 1, "ScanTime_99.9th_percentile": 1, "ScanTime_TimeRangeCount_0-1": 1, "CheckAndMutateBlockBytesScanned_num_ops": 0, "CheckAndMutateBlockBytesScanned_min": 0, "CheckAndMutateBlockBytesScanned_max": 0, "CheckAndMutateBlockBytesScanned_mean": 0, "CheckAndMutateBlockBytesScanned_25th_percentile": 0, "CheckAndMutateBlockBytesScanned_median": 0, "CheckAndMutateBlockBytesScanned_75th_percentile": 0, "CheckAndMutateBlockBytesScanned_90th_percentile": 0, "CheckAndMutateBlockBytesScanned_95th_percentile": 0, "CheckAndMutateBlockBytesScanned_98th_percentile": 0, "CheckAndMutateBlockBytesScanned_99th_percentile": 0, "CheckAndMutateBlockBytesScanned_99.9th_percentile": 0, "Put_num_ops": 0, "Put_min": 0, "Put_max": 0, "Put_mean": 0, "Put_25th_percentile": 0, "Put_median": 0, "Put_75th_percentile": 0, "Put_90th_percentile": 0, "Put_95th_percentile": 0, "Put_98th_percentile": 0, "Put_99th_percentile": 0, "Put_99.9th_percentile": 0, "splitRequestCount": 0, "AppendBlockBytesScanned_num_ops": 0, "AppendBlockBytesScanned_min": 0, "AppendBlockBytesScanned_max": 0, "AppendBlockBytesScanned_mean": 0, "AppendBlockBytesScanned_25th_percentile": 0, "AppendBlockBytesScanned_median": 0, "AppendBlockBytesScanned_75th_percentile": 0, "AppendBlockBytesScanned_90th_percentile": 0, "AppendBlockBytesScanned_95th_percentile": 0, "AppendBlockBytesScanned_98th_percentile": 0, "AppendBlockBytesScanned_99th_percentile": 0, "AppendBlockBytesScanned_99.9th_percentile": 0, "PutBatch_num_ops": 0, "PutBatch_min": 0, "PutBatch_max": 0, "PutBatch_mean": 0, "PutBatch_25th_percentile": 0, "PutBatch_median": 0, "PutBatch_75th_percentile": 0, "PutBatch_90th_percentile": 0, "PutBatch_95th_percentile": 0, "PutBatch_98th_percentile": 0, "PutBatch_99th_percentile": 0, "PutBatch_99.9th_percentile": 0, "IncrementBlockBytesScanned_num_ops": 0, "IncrementBlockBytesScanned_min": 0, "IncrementBlockBytesScanned_max": 0, "IncrementBlockBytesScanned_mean": 0, "IncrementBlockBytesScanned_25th_percentile": 0, "IncrementBlockBytesScanned_median": 0, "IncrementBlockBytesScanned_75th_percentile": 0, "IncrementBlockBytesScanned_90th_percentile": 0, "IncrementBlockBytesScanned_95th_percentile": 0, "IncrementBlockBytesScanned_98th_percentile": 0, "IncrementBlockBytesScanned_99th_percentile": 0, "IncrementBlockBytesScanned_99.9th_percentile": 0, "SplitTime_num_ops": 0, "SplitTime_min": 0, "SplitTime_max": 0, "SplitTime_mean": 0, "SplitTime_25th_percentile": 0, "SplitTime_median": 0, "SplitTime_75th_percentile": 0, "SplitTime_90th_percentile": 0, "SplitTime_95th_percentile": 0, "SplitTime_98th_percentile": 0, "SplitTime_99th_percentile": 0, "SplitTime_99.9th_percentile": 0, "GetBlockBytesScanned_num_ops": 0, "GetBlockBytesScanned_min": 0, "GetBlockBytesScanned_max": 0, "GetBlockBytesScanned_mean": 0, "GetBlockBytesScanned_25th_percentile": 0, "GetBlockBytesScanned_median": 0, "GetBlockBytesScanned_75th_percentile": 0, "GetBlockBytesScanned_90th_percentile": 0, "GetBlockBytesScanned_95th_percentile": 0, "GetBlockBytesScanned_98th_percentile": 0, "GetBlockBytesScanned_99th_percentile": 0, "GetBlockBytesScanned_99.9th_percentile": 0, "majorCompactedInputBytes": 14595, "slowAppendCount": 0, "flushedOutputBytes": 9812, "Replay_num_ops": 0, "Replay_min": 0, "Replay_max": 0, "Replay_mean": 0, "Replay_25th_percentile": 0, "Replay_median": 0, "Replay_75th_percentile": 0, "Replay_90th_percentile": 0, "Replay_95th_percentile": 0, "Replay_98th_percentile": 0, "Replay_99th_percentile": 0, "Replay_99.9th_percentile": 0, "MajorCompactionInputSize_num_ops": 2, "MajorCompactionInputSize_min": 4906, "MajorCompactionInputSize_max": 9689, "MajorCompactionInputSize_mean": 7297, "MajorCompactionInputSize_25th_percentile": 6101, "MajorCompactionInputSize_median": 7297, "MajorCompactionInputSize_75th_percentile": 8493, "MajorCompactionInputSize_90th_percentile": 9210, "MajorCompactionInputSize_95th_percentile": 9449, "MajorCompactionInputSize_98th_percentile": 9593, "MajorCompactionInputSize_99th_percentile": 9641, "MajorCompactionInputSize_99.9th_percentile": 9684, "MajorCompactionInputSize_SizeRangeCount_100-1000": 2, "pauseInfoThresholdExceeded": 0, "CheckAndDelete_num_ops": 0, "CheckAndDelete_min": 0, "CheckAndDelete_max": 0, "CheckAndDelete_mean": 0, "CheckAndDelete_25th_percentile": 0, "CheckAndDelete_median": 0, "CheckAndDelete_75th_percentile": 0, "CheckAndDelete_90th_percentile": 0, "CheckAndDelete_95th_percentile": 0, "CheckAndDelete_98th_percentile": 0, "CheckAndDelete_99th_percentile": 0, "CheckAndDelete_99.9th_percentile": 0, "CompactionInputSize_num_ops": 2, "CompactionInputSize_min": 4906, "CompactionInputSize_max": 9689, "CompactionInputSize_mean": 7297, "CompactionInputSize_25th_percentile": 6101, "CompactionInputSize_median": 7297, "CompactionInputSize_75th_percentile": 8493, "CompactionInputSize_90th_percentile": 9210, "CompactionInputSize_95th_percentile": 9449, "CompactionInputSize_98th_percentile": 9593, "CompactionInputSize_99th_percentile": 9641, "CompactionInputSize_99.9th_percentile": 9684, "CompactionInputSize_SizeRangeCount_100-1000": 2, "flushedMemstoreBytes": 50, "majorCompactedOutputBytes": 9218, "slowPutCount": 0, "compactedInputBytes": 14595, "FlushOutputSize_num_ops": 1, "FlushOutputSize_min": 9812, "FlushOutputSize_max": 9812, "FlushOutputSize_mean": 9812, "FlushOutputSize_25th_percentile": 9812, "FlushOutputSize_median": 9812, "FlushOutputSize_75th_percentile": 9812, "FlushOutputSize_90th_percentile": 9812, "FlushOutputSize_95th_percentile": 9812, "FlushOutputSize_98th_percentile": 9812, "FlushOutputSize_99th_percentile": 9812, "FlushOutputSize_99.9th_percentile": 9812, "FlushOutputSize_SizeRangeCount_100-1000": 1, "PauseTimeWithGc_num_ops": 0, "PauseTimeWithGc_min": 0, "PauseTimeWithGc_max": 0, "PauseTimeWithGc_mean": 0, "PauseTimeWithGc_25th_percentile": 0, "PauseTimeWithGc_median": 0, "PauseTimeWithGc_75th_percentile": 0, "PauseTimeWithGc_90th_percentile": 0, "PauseTimeWithGc_95th_percentile": 0, "PauseTimeWithGc_98th_percentile": 0, "PauseTimeWithGc_99th_percentile": 0, "PauseTimeWithGc_99.9th_percentile": 0, "compactedOutputBytes": 9218, "pauseWarnThresholdExceeded": 0, "ScanBlockBytesScanned_num_ops": 1, "ScanBlockBytesScanned_min": 32, "ScanBlockBytesScanned_max": 32, "ScanBlockBytesScanned_mean": 32, "ScanBlockBytesScanned_25th_percentile": 32, "ScanBlockBytesScanned_median": 32, "ScanBlockBytesScanned_75th_percentile": 32, "ScanBlockBytesScanned_90th_percentile": 32, "ScanBlockBytesScanned_95th_percentile": 32, "ScanBlockBytesScanned_98th_percentile": 32, "ScanBlockBytesScanned_99th_percentile": 32, "ScanBlockBytesScanned_99.9th_percentile": 32, "ScanBlockBytesScanned_SizeRangeCount_10-100": 1, "Increment_num_ops": 0, "Increment_min": 0, "Increment_max": 0, "Increment_mean": 0, "Increment_25th_percentile": 0, "Increment_median": 0, "Increment_75th_percentile": 0, "Increment_90th_percentile": 0, "Increment_95th_percentile": 0, "Increment_98th_percentile": 0, "Increment_99th_percentile": 0, "Increment_99.9th_percentile": 0, "Delete_num_ops": 1, "Delete_min": 4, "Delete_max": 4, "Delete_mean": 4, "Delete_25th_percentile": 4, "Delete_median": 4, "Delete_75th_percentile": 4, "Delete_90th_percentile": 4, "Delete_95th_percentile": 4, "Delete_98th_percentile": 4, "Delete_99th_percentile": 4, "Delete_99.9th_percentile": 4, "Delete_TimeRangeCount_3-10": 1, "DeleteBatch_num_ops": 0, "DeleteBatch_min": 0, "DeleteBatch_max": 0, "DeleteBatch_mean": 0, "DeleteBatch_25th_percentile": 0, "DeleteBatch_median": 0, "DeleteBatch_75th_percentile": 0, "DeleteBatch_90th_percentile": 0, "DeleteBatch_95th_percentile": 0, "DeleteBatch_98th_percentile": 0, "DeleteBatch_99th_percentile": 0, "DeleteBatch_99.9th_percentile": 0, "blockBytesScannedCount": 32, "FlushMemstoreSize_num_ops": 1, "FlushMemstoreSize_min": 50, "FlushMemstoreSize_max": 50, "FlushMemstoreSize_mean": 50, "FlushMemstoreSize_25th_percentile": 50, "FlushMemstoreSize_median": 50, "FlushMemstoreSize_75th_percentile": 50, "FlushMemstoreSize_90th_percentile": 50, "FlushMemstoreSize_95th_percentile": 50, "FlushMemstoreSize_98th_percentile": 50, "FlushMemstoreSize_99th_percentile": 50, "FlushMemstoreSize_99.9th_percentile": 50, "FlushMemstoreSize_SizeRangeCount_10-100": 1, "CompactionInputFileCount_num_ops": 2, "CompactionInputFileCount_min": 1, "CompactionInputFileCount_max": 2, "CompactionInputFileCount_mean": 1, "CompactionInputFileCount_25th_percentile": 1, "CompactionInputFileCount_median": 2, "CompactionInputFileCount_75th_percentile": 2, "CompactionInputFileCount_90th_percentile": 2, "CompactionInputFileCount_95th_percentile": 2, "CompactionInputFileCount_98th_percentile": 2, "CompactionInputFileCount_99th_percentile": 2, "CompactionInputFileCount_99.9th_percentile": 2, "CompactionTime_num_ops": 2, "CompactionTime_min": 35, "CompactionTime_max": 41, "CompactionTime_mean": 38, "CompactionTime_25th_percentile": 35, "CompactionTime_median": 36, "CompactionTime_75th_percentile": 41, "CompactionTime_90th_percentile": 41, "CompactionTime_95th_percentile": 41, "CompactionTime_98th_percentile": 41, "CompactionTime_99th_percentile": 41, "CompactionTime_99.9th_percentile": 41, "CompactionTime_TimeRangeCount_30-100": 2, "Get_num_ops": 0, "Get_min": 0, "Get_max": 0, "Get_mean": 0, "Get_25th_percentile": 0, "Get_median": 0, "Get_75th_percentile": 0, "Get_90th_percentile": 0, "Get_95th_percentile": 0, "Get_98th_percentile": 0, "Get_99th_percentile": 0, "Get_99.9th_percentile": 0, "MajorCompactionInputFileCount_num_ops": 2, "MajorCompactionInputFileCount_min": 1, "MajorCompactionInputFileCount_max": 2, "MajorCompactionInputFileCount_mean": 1, "MajorCompactionInputFileCount_25th_percentile": 1, "MajorCompactionInputFileCount_median": 2, "MajorCompactionInputFileCount_75th_percentile": 2, "MajorCompactionInputFileCount_90th_percentile": 2, "MajorCompactionInputFileCount_95th_percentile": 2, "MajorCompactionInputFileCount_98th_percentile": 2, "MajorCompactionInputFileCount_99th_percentile": 2, "MajorCompactionInputFileCount_99.9th_percentile": 2, "scannerLeaseExpiredCount": 0, "CheckAndPut_num_ops": 0, "CheckAndPut_min": 0, "CheckAndPut_max": 0, "CheckAndPut_mean": 0, "CheckAndPut_25th_percentile": 0, "CheckAndPut_median": 0, "CheckAndPut_75th_percentile": 0, "CheckAndPut_90th_percentile": 0, "CheckAndPut_95th_percentile": 0, "CheckAndPut_98th_percentile": 0, "CheckAndPut_99th_percentile": 0, "CheckAndPut_99.9th_percentile": 0, "MajorCompactionOutputSize_num_ops": 2, "MajorCompactionOutputSize_min": 4592, "MajorCompactionOutputSize_max": 4626, "MajorCompactionOutputSize_mean": 4609, "MajorCompactionOutputSize_25th_percentile": 4600, "MajorCompactionOutputSize_median": 4609, "MajorCompactionOutputSize_75th_percentile": 4617, "MajorCompactionOutputSize_90th_percentile": 4622, "MajorCompactionOutputSize_95th_percentile": 4624, "MajorCompactionOutputSize_98th_percentile": 4625, "MajorCompactionOutputSize_99th_percentile": 4625, "MajorCompactionOutputSize_99.9th_percentile": 4625, "MajorCompactionOutputSize_SizeRangeCount_100-1000": 2, "CompactionOutputFileCount_num_ops": 2, "CompactionOutputFileCount_min": 1, "CompactionOutputFileCount_max": 1, "CompactionOutputFileCount_mean": 1, "CompactionOutputFileCount_25th_percentile": 1, "CompactionOutputFileCount_median": 1, "CompactionOutputFileCount_75th_percentile": 1, "CompactionOutputFileCount_90th_percentile": 1, "CompactionOutputFileCount_95th_percentile": 1, "CompactionOutputFileCount_98th_percentile": 1, "CompactionOutputFileCount_99th_percentile": 1, "CompactionOutputFileCount_99.9th_percentile": 1, "slowDeleteCount": 0, "FlushTime_num_ops": 1, "FlushTime_min": 118, "FlushTime_max": 118, "FlushTime_mean": 118, "FlushTime_25th_percentile": 118, "FlushTime_median": 118, "FlushTime_75th_percentile": 118, "FlushTime_90th_percentile": 118, "FlushTime_95th_percentile": 118, "FlushTime_98th_percentile": 118, "FlushTime_99th_percentile": 118, "FlushTime_99.9th_percentile": 118, "FlushTime_TimeRangeCount_100-300": 1, "splitSuccessCount": 0, "MajorCompactionOutputFileCount_num_ops": 2, "MajorCompactionOutputFileCount_min": 1, "MajorCompactionOutputFileCount_max": 1, "MajorCompactionOutputFileCount_mean": 1, "MajorCompactionOutputFileCount_25th_percentile": 1, "MajorCompactionOutputFileCount_median": 1, "MajorCompactionOutputFileCount_75th_percentile": 1, "MajorCompactionOutputFileCount_90th_percentile": 1, "MajorCompactionOutputFileCount_95th_percentile": 1, "MajorCompactionOutputFileCount_98th_percentile": 1, "MajorCompactionOutputFileCount_99th_percentile": 1, "MajorCompactionOutputFileCount_99.9th_percentile": 1, "slowGetCount": 0, "ScanSize_num_ops": 1, "ScanSize_min": 0, "ScanSize_max": 0, "ScanSize_mean": 0, "ScanSize_25th_percentile": 0, "ScanSize_median": 0, "ScanSize_75th_percentile": 0, "ScanSize_90th_percentile": 0, "ScanSize_95th_percentile": 0, "ScanSize_98th_percentile": 0, "ScanSize_99th_percentile": 0, "ScanSize_99.9th_percentile": 0, "ScanSize_SizeRangeCount_0-10": 1, "CompactionOutputSize_num_ops": 2, "CompactionOutputSize_min": 4592, "CompactionOutputSize_max": 4626, "CompactionOutputSize_mean": 4609, "CompactionOutputSize_25th_percentile": 4600, "CompactionOutputSize_median": 4609, "CompactionOutputSize_75th_percentile": 4617, "CompactionOutputSize_90th_percentile": 4622, "CompactionOutputSize_95th_percentile": 4624, "CompactionOutputSize_98th_percentile": 4625, "CompactionOutputSize_99th_percentile": 4625, "CompactionOutputSize_99.9th_percentile": 4625, "CompactionOutputSize_SizeRangeCount_100-1000": 2, "PauseTimeWithoutGc_num_ops": 0, "PauseTimeWithoutGc_min": 0, "PauseTimeWithoutGc_max": 0, "PauseTimeWithoutGc_mean": 0, "PauseTimeWithoutGc_25th_percentile": 0, "PauseTimeWithoutGc_median": 0, "PauseTimeWithoutGc_75th_percentile": 0, "PauseTimeWithoutGc_90th_percentile": 0, "PauseTimeWithoutGc_95th_percentile": 0, "PauseTimeWithoutGc_98th_percentile": 0, "PauseTimeWithoutGc_99th_percentile": 0, "PauseTimeWithoutGc_99.9th_percentile": 0, "slowIncrementCount": 0, "Append_num_ops": 0, "Append_min": 0, "Append_max": 0, "Append_mean": 0, "Append_25th_percentile": 0, "Append_median": 0, "Append_75th_percentile": 0, "Append_90th_percentile": 0, "Append_95th_percentile": 0, "Append_98th_percentile": 0, "Append_99th_percentile": 0, "Append_99.9th_percentile": 0, "Bulkload_count": 0, "Bulkload_mean_rate": 0.0, "Bulkload_1min_rate": 0.0, "Bulkload_5min_rate": 0.0, "Bulkload_15min_rate": 0.0, "Bulkload_num_ops": 0, "Bulkload_min": 0, "Bulkload_max": 0, "Bulkload_mean": 0, "Bulkload_25th_percentile": 0, "Bulkload_median": 0, "Bulkload_75th_percentile": 0, "Bulkload_90th_percentile": 0, "Bulkload_95th_percentile": 0, "Bulkload_98th_percentile": 0, "Bulkload_99th_percentile": 0, "Bulkload_99.9th_percentile": 0 } ] } 2024-11-13T10:26:01,123 WARN [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45401 {}] master.MasterRpcServices(700): 770665a7984d,44657,1731493541444 reported a fatal error: ***** ABORTING region server 770665a7984d,44657,1731493541444: testing ***** 2024-11-13T10:26:01,126 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '770665a7984d,44657,1731493541444' ***** 2024-11-13T10:26:01,126 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: testing 2024-11-13T10:26:01,127 INFO [RS:0;770665a7984d:44657 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-13T10:26:01,127 INFO [RS:0;770665a7984d:44657 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager abruptly. 2024-11-13T10:26:01,127 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-13T10:26:01,127 INFO [RS:0;770665a7984d:44657 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager abruptly. 2024-11-13T10:26:01,127 INFO [RS:0;770665a7984d:44657 {}] regionserver.HRegionServer(3091): Received CLOSE for 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:26:01,128 INFO [RS:0;770665a7984d:44657 {}] regionserver.HRegionServer(956): aborting server 770665a7984d,44657,1731493541444 2024-11-13T10:26:01,128 INFO [RS:0;770665a7984d:44657 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T10:26:01,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36821 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Get size: 140 connection: 172.17.0.2:54916 deadline: 1731493621128, exception=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=770665a7984d port=44657 startCode=1731493541444. As of locationSeqNum=12. 2024-11-13T10:26:01,128 DEBUG [RS_CLOSE_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 70a598aa9b18017afa50633b8eb231df, disabling compactions & flushes 2024-11-13T10:26:01,128 INFO [RS:0;770665a7984d:44657 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;770665a7984d:44657. 2024-11-13T10:26:01,128 INFO [RS_CLOSE_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. 2024-11-13T10:26:01,128 DEBUG [RS_CLOSE_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. 2024-11-13T10:26:01,128 DEBUG [RS:0;770665a7984d:44657 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T10:26:01,128 DEBUG [RS_CLOSE_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. after waiting 0 ms 2024-11-13T10:26:01,129 DEBUG [RS_CLOSE_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. 2024-11-13T10:26:01,129 DEBUG [RS:0;770665a7984d:44657 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T10:26:01,129 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df., hostname=770665a7984d,36821,1731493541562, seqNum=5 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df., hostname=770665a7984d,36821,1731493541562, seqNum=5, error=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=770665a7984d port=44657 startCode=1731493541444. As of locationSeqNum=12. 2024-11-13T10:26:01,129 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df., hostname=770665a7984d,36821,1731493541562, seqNum=5 is org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=770665a7984d port=44657 startCode=1731493541444. As of locationSeqNum=12. 2024-11-13T10:26:01,129 INFO [RS:0;770665a7984d:44657 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-13T10:26:01,129 INFO [RS:0;770665a7984d:44657 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-13T10:26:01,129 INFO [RS:0;770665a7984d:44657 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-13T10:26:01,129 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.AsyncRegionLocatorHelper(84): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df., hostname=770665a7984d,36821,1731493541562, seqNum=5 with the new location region=testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df., hostname=770665a7984d,44657,1731493541444, seqNum=12 constructed by org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=770665a7984d port=44657 startCode=1731493541444. As of locationSeqNum=12. 2024-11-13T10:26:01,129 INFO [RS:0;770665a7984d:44657 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-13T10:26:01,131 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.regionserver.RegionServerAbortedException: Server 770665a7984d,44657,1731493541444 aborting at org.apache.hadoop.hbase.ipc.ServerRpcConnection.processRequest(ServerRpcConnection.java:564) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.ServerRpcConnection.processOneRpc(ServerRpcConnection.java:364) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyServerRpcConnection.process(NettyServerRpcConnection.java:89) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcServerRequestDecoder.channelRead0(NettyRpcServerRequestDecoder.java:56) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcServerRequestDecoder.channelRead0(NettyRpcServerRequestDecoder.java:31) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler.channelRead(SimpleChannelInboundHandler.java:99) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T10:26:01,132 INFO [RS:0;770665a7984d:44657 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-13T10:26:01,132 DEBUG [RS:0;770665a7984d:44657 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 70a598aa9b18017afa50633b8eb231df=testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df.} 2024-11-13T10:26:01,132 DEBUG [RS_CLOSE_META-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T10:26:01,132 INFO [RS_CLOSE_META-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T10:26:01,132 DEBUG [RS_CLOSE_META-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T10:26:01,132 DEBUG [RS_CLOSE_META-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T10:26:01,132 DEBUG [RS_CLOSE_META-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T10:26:01,132 DEBUG [RS:0;770665a7984d:44657 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:26:01,133 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(64): Try updating region=hbase:meta,,1.1588230740, hostname=770665a7984d,44657,1731493541444, seqNum=-1 , the old value is region=hbase:meta,,1.1588230740, hostname=770665a7984d,44657,1731493541444, seqNum=-1, error=org.apache.hadoop.hbase.regionserver.RegionServerAbortedException: Server 770665a7984d,44657,1731493541444 aborting 2024-11-13T10:26:01,133 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=hbase:meta,,1.1588230740, hostname=770665a7984d,44657,1731493541444, seqNum=-1 is org.apache.hadoop.hbase.regionserver.RegionServerAbortedException: Server 770665a7984d,44657,1731493541444 aborting 2024-11-13T10:26:01,133 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(88): Try removing region=hbase:meta,,1.1588230740, hostname=770665a7984d,44657,1731493541444, seqNum=-1 from cache 2024-11-13T10:26:01,137 INFO [RS_CLOSE_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. 2024-11-13T10:26:01,137 DEBUG [RS_CLOSE_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 70a598aa9b18017afa50633b8eb231df: Waiting for close lock at 1731493561128Running coprocessor pre-close hooks at 1731493561128Disabling compacts and flushes for region at 1731493561128Disabling writes for close at 1731493561128Writing region close event to WAL at 1731493561137 (+9 ms)Running coprocessor post-close hooks at 1731493561137Closed at 1731493561137 2024-11-13T10:26:01,138 DEBUG [RS_CLOSE_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. 2024-11-13T10:26:01,138 ERROR [RS_CLOSE_META-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1960): Memstore data size is 5811 in region hbase:meta,,1.1588230740 2024-11-13T10:26:01,138 DEBUG [RS_CLOSE_META-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T10:26:01,138 INFO [RS_CLOSE_META-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T10:26:01,138 DEBUG [RS_CLOSE_META-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731493561132Running coprocessor pre-close hooks at 1731493561132Disabling compacts and flushes for region at 1731493561132Disabling writes for close at 1731493561132Writing region close event to WAL at 1731493561138 (+6 ms)Running coprocessor post-close hooks at 1731493561138Closed at 1731493561138 2024-11-13T10:26:01,138 DEBUG [RS_CLOSE_META-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-13T10:26:01,147 INFO [regionserver/770665a7984d:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T10:26:01,239 DEBUG [Async-Client-Retry-Timer-pool-0 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T10:26:01,241 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=770665a7984d,44657,1731493541444, seqNum=-1] 2024-11-13T10:26:01,241 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.regionserver.RegionServerAbortedException: Server 770665a7984d,44657,1731493541444 aborting at org.apache.hadoop.hbase.ipc.ServerRpcConnection.processRequest(ServerRpcConnection.java:564) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.ServerRpcConnection.processOneRpc(ServerRpcConnection.java:364) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyServerRpcConnection.process(NettyServerRpcConnection.java:89) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcServerRequestDecoder.channelRead0(NettyRpcServerRequestDecoder.java:56) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcServerRequestDecoder.channelRead0(NettyRpcServerRequestDecoder.java:31) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler.channelRead(SimpleChannelInboundHandler.java:99) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T10:26:01,242 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(64): Try updating region=hbase:meta,,1.1588230740, hostname=770665a7984d,44657,1731493541444, seqNum=-1 , the old value is region=hbase:meta,,1.1588230740, hostname=770665a7984d,44657,1731493541444, seqNum=-1, error=org.apache.hadoop.hbase.regionserver.RegionServerAbortedException: Server 770665a7984d,44657,1731493541444 aborting 2024-11-13T10:26:01,242 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=hbase:meta,,1.1588230740, hostname=770665a7984d,44657,1731493541444, seqNum=-1 is org.apache.hadoop.hbase.regionserver.RegionServerAbortedException: Server 770665a7984d,44657,1731493541444 aborting 2024-11-13T10:26:01,243 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(88): Try removing region=hbase:meta,,1.1588230740, hostname=770665a7984d,44657,1731493541444, seqNum=-1 from cache 2024-11-13T10:26:01,306 INFO [regionserver/770665a7984d:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-13T10:26:01,306 INFO [regionserver/770665a7984d:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-13T10:26:01,333 INFO [RS:0;770665a7984d:44657 {}] regionserver.HRegionServer(976): stopping server 770665a7984d,44657,1731493541444; all regions closed. 2024-11-13T10:26:01,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741836_1012 (size=3561) 2024-11-13T10:26:01,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741836_1012 (size=3561) 2024-11-13T10:26:01,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741833_1009 (size=1407) 2024-11-13T10:26:01,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741833_1009 (size=1407) 2024-11-13T10:26:01,341 DEBUG [RS:0;770665a7984d:44657 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T10:26:01,341 INFO [RS:0;770665a7984d:44657 {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T10:26:01,341 INFO [RS:0;770665a7984d:44657 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T10:26:01,341 INFO [RS:0;770665a7984d:44657 {}] hbase.ChoreService(370): Chore service for: regionserver/770665a7984d:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-13T10:26:01,342 INFO [RS:0;770665a7984d:44657 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T10:26:01,342 INFO [regionserver/770665a7984d:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T10:26:01,342 INFO [RS:0;770665a7984d:44657 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44657 2024-11-13T10:26:01,350 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44657-0x10110dc99880001, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/770665a7984d,44657,1731493541444 2024-11-13T10:26:01,350 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45401-0x10110dc99880000, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T10:26:01,350 INFO [RS:0;770665a7984d:44657 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T10:26:01,351 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [770665a7984d,44657,1731493541444] 2024-11-13T10:26:01,353 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/770665a7984d,44657,1731493541444 already deleted, retry=false 2024-11-13T10:26:01,353 INFO [RegionServerTracker-0 {}] master.ServerManager(695): Processing expiration of 770665a7984d,44657,1731493541444 on 770665a7984d,45401,1731493540547 2024-11-13T10:26:01,360 DEBUG [RegionServerTracker-0 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:SERVER_CRASH_START, hasLock=false; ServerCrashProcedure 770665a7984d,44657,1731493541444, splitWal=true, meta=true 2024-11-13T10:26:01,363 INFO [PEWorker-5 {}] procedure.ServerCrashProcedure(169): Start pid=13, state=RUNNABLE:SERVER_CRASH_START, hasLock=true; ServerCrashProcedure 770665a7984d,44657,1731493541444, splitWal=true, meta=true 2024-11-13T10:26:01,363 INFO [RegionServerTracker-0 {}] assignment.AssignmentManager(1991): Scheduled ServerCrashProcedure pid=13 for 770665a7984d,44657,1731493541444 (carryingMeta=true) 770665a7984d,44657,1731493541444/CRASHED/regionCount=2/lock=java.util.concurrent.locks.ReentrantReadWriteLock@29ae7886[Write locks = 1, Read locks = 0], oldState=ONLINE. 2024-11-13T10:26:01,364 INFO [PEWorker-5 {}] procedure.ServerCrashProcedure(339): Splitting WALs pid=13, state=RUNNABLE:SERVER_CRASH_SPLIT_META_LOGS, hasLock=true; ServerCrashProcedure 770665a7984d,44657,1731493541444, splitWal=true, meta=true, isMeta: true 2024-11-13T10:26:01,366 DEBUG [PEWorker-5 {}] master.MasterWalManager(329): Renamed region directory: hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/WALs/770665a7984d,44657,1731493541444-splitting 2024-11-13T10:26:01,367 INFO [PEWorker-5 {}] master.SplitWALManager(105): 770665a7984d,44657,1731493541444 WAL count=1, meta=true 2024-11-13T10:26:01,370 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE:ACQUIRE_SPLIT_WAL_WORKER, hasLock=false; SplitWALProcedure 770665a7984d%2C44657%2C1731493541444.meta.1731493543959.meta}] 2024-11-13T10:26:01,375 DEBUG [PEWorker-3 {}] master.SplitWALManager(158): Acquired split WAL worker=770665a7984d,46143,1731493541627 2024-11-13T10:26:01,377 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE, hasLock=false; SplitWALRemoteProcedure 770665a7984d%2C44657%2C1731493541444.meta.1731493543959.meta, worker=770665a7984d,46143,1731493541627}] 2024-11-13T10:26:01,449 DEBUG [Async-Client-Retry-Timer-pool-0 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T10:26:01,451 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=770665a7984d,44657,1731493541444, seqNum=-1] 2024-11-13T10:26:01,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44657-0x10110dc99880001, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T10:26:01,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44657-0x10110dc99880001, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T10:26:01,453 INFO [RS:0;770665a7984d:44657 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T10:26:01,453 WARN [RPCClient-NioEventLoopGroup-6-3 {}] ipc.NettyRpcConnection$2(409): Exception encountered while connecting to the server 770665a7984d:44657 org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: Connection refused: 770665a7984d/172.17.0.2:44657 Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hbase.thirdparty.io.netty.channel.socket.nio.NioSocketChannel.doFinishConnect(NioSocketChannel.java:336) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioChannel$AbstractNioUnsafe.finishConnect(AbstractNioChannel.java:339) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:776) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T10:26:01,453 INFO [RS:0;770665a7984d:44657 {}] regionserver.HRegionServer(1031): Exiting; stopping=770665a7984d,44657,1731493541444; zookeeper connection closed. 2024-11-13T10:26:01,454 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@28e9c05b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@28e9c05b 2024-11-13T10:26:01,454 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(64): Try updating region=hbase:meta,,1.1588230740, hostname=770665a7984d,44657,1731493541444, seqNum=-1 , the old value is region=hbase:meta,,1.1588230740, hostname=770665a7984d,44657,1731493541444, seqNum=-1, error=java.net.ConnectException: Call to address=770665a7984d:44657 failed on connection exception: org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: Connection refused: 770665a7984d/172.17.0.2:44657 2024-11-13T10:26:01,454 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=hbase:meta,,1.1588230740, hostname=770665a7984d,44657,1731493541444, seqNum=-1 is java.net.ConnectException: Connection refused 2024-11-13T10:26:01,454 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(88): Try removing region=hbase:meta,,1.1588230740, hostname=770665a7984d,44657,1731493541444, seqNum=-1 from cache 2024-11-13T10:26:01,454 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.FailedServers(52): Added failed server with address 770665a7984d:44657 to list caused by org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: Connection refused: 770665a7984d/172.17.0.2:44657 2024-11-13T10:26:01,540 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-13T10:26:01,541 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57203, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-13T10:26:01,543 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46143 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SplitWALCallable, pid=15 2024-11-13T10:26:01,562 INFO [RS_LOG_REPLAY_OPS-regionserver/770665a7984d:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(299): Splitting hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/WALs/770665a7984d,44657,1731493541444-splitting/770665a7984d%2C44657%2C1731493541444.meta.1731493543959.meta, size=3.5 K (3561bytes) 2024-11-13T10:26:01,562 INFO [RS_LOG_REPLAY_OPS-regionserver/770665a7984d:0-0 {event_type=RS_LOG_REPLAY, pid=15}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/WALs/770665a7984d,44657,1731493541444-splitting/770665a7984d%2C44657%2C1731493541444.meta.1731493543959.meta 2024-11-13T10:26:01,563 INFO [RS_LOG_REPLAY_OPS-regionserver/770665a7984d:0-0 {event_type=RS_LOG_REPLAY, pid=15}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/WALs/770665a7984d,44657,1731493541444-splitting/770665a7984d%2C44657%2C1731493541444.meta.1731493543959.meta after 1ms 2024-11-13T10:26:01,566 DEBUG [RS_LOG_REPLAY_OPS-regionserver/770665a7984d:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/WALs/770665a7984d,44657,1731493541444-splitting/770665a7984d%2C44657%2C1731493541444.meta.1731493543959.meta: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-13T10:26:01,566 INFO [RS_LOG_REPLAY_OPS-regionserver/770665a7984d:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(310): Open hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/WALs/770665a7984d,44657,1731493541444-splitting/770665a7984d%2C44657%2C1731493541444.meta.1731493543959.meta took 4ms 2024-11-13T10:26:01,576 DEBUG [RS_LOG_REPLAY_OPS-regionserver/770665a7984d:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(352): Last flushed sequenceid for 1588230740: last_flushed_sequence_id: 18446744073709551615 store_sequence_id { family_name: "info" sequence_id: 5 } store_sequence_id { family_name: "ns" sequence_id: 3 } store_sequence_id { family_name: "rep_barrier" sequence_id: 18446744073709551615 } store_sequence_id { family_name: "table" sequence_id: 6 } 2024-11-13T10:26:01,577 DEBUG [RS_LOG_REPLAY_OPS-regionserver/770665a7984d:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/WALs/770665a7984d,44657,1731493541444-splitting/770665a7984d%2C44657%2C1731493541444.meta.1731493543959.meta so closing down 2024-11-13T10:26:01,577 DEBUG [RS_LOG_REPLAY_OPS-regionserver/770665a7984d:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-13T10:26:01,578 INFO [RS_LOG_REPLAY_OPS-regionserver/770665a7984d:0-0-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000004-770665a7984d%2C44657%2C1731493541444.meta.1731493543959.meta.temp 2024-11-13T10:26:01,580 INFO [RS_LOG_REPLAY_OPS-regionserver/770665a7984d:0-0-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/hbase/meta/1588230740/recovered.edits/0000000000000000004-770665a7984d%2C44657%2C1731493541444.meta.1731493543959.meta.temp 2024-11-13T10:26:01,580 INFO [RS_LOG_REPLAY_OPS-regionserver/770665a7984d:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.OutputSink(145): 3 split writer threads finished 2024-11-13T10:26:01,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741892_1070 (size=3346) 2024-11-13T10:26:01,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741892_1070 (size=3346) 2024-11-13T10:26:01,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741892_1070 (size=3346) 2024-11-13T10:26:01,590 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/hbase/meta/1588230740/recovered.edits/0000000000000000004-770665a7984d%2C44657%2C1731493541444.meta.1731493543959.meta.temp (wrote 15 edits, skipped 0 edits in 0 ms) 2024-11-13T10:26:01,591 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/hbase/meta/1588230740/recovered.edits/0000000000000000004-770665a7984d%2C44657%2C1731493541444.meta.1731493543959.meta.temp to hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/hbase/meta/1588230740/recovered.edits/0000000000000000018 2024-11-13T10:26:01,591 INFO [RS_LOG_REPLAY_OPS-regionserver/770665a7984d:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(425): Processed 16 edits across 1 Regions in 23 ms; skipped=1; WAL=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/WALs/770665a7984d,44657,1731493541444-splitting/770665a7984d%2C44657%2C1731493541444.meta.1731493543959.meta, size=3.5 K, length=3561, corrupted=false, cancelled=false 2024-11-13T10:26:01,591 DEBUG [RS_LOG_REPLAY_OPS-regionserver/770665a7984d:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(428): Completed split of hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/WALs/770665a7984d,44657,1731493541444-splitting/770665a7984d%2C44657%2C1731493541444.meta.1731493543959.meta, journal: Splitting hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/WALs/770665a7984d,44657,1731493541444-splitting/770665a7984d%2C44657%2C1731493541444.meta.1731493543959.meta, size=3.5 K (3561bytes) at 1731493561562Finishing writing output for hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/WALs/770665a7984d,44657,1731493541444-splitting/770665a7984d%2C44657%2C1731493541444.meta.1731493543959.meta so closing down at 1731493561577 (+15 ms)Creating recovered edits writer path=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/hbase/meta/1588230740/recovered.edits/0000000000000000004-770665a7984d%2C44657%2C1731493541444.meta.1731493543959.meta.temp at 1731493561580 (+3 ms)3 split writer threads finished at 1731493561581 (+1 ms)Closed recovered edits writer path=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/hbase/meta/1588230740/recovered.edits/0000000000000000004-770665a7984d%2C44657%2C1731493541444.meta.1731493543959.meta.temp (wrote 15 edits, skipped 0 edits in 0 ms) at 1731493561590 (+9 ms)Rename recovered edits hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/hbase/meta/1588230740/recovered.edits/0000000000000000004-770665a7984d%2C44657%2C1731493541444.meta.1731493543959.meta.temp to hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/hbase/meta/1588230740/recovered.edits/0000000000000000018 at 1731493561591 (+1 ms)Processed 16 edits across 1 Regions in 23 ms; skipped=1; WAL=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/WALs/770665a7984d,44657,1731493541444-splitting/770665a7984d%2C44657%2C1731493541444.meta.1731493543959.meta, size=3.5 K, length=3561, corrupted=false, cancelled=false at 1731493561591 2024-11-13T10:26:01,592 DEBUG [RS_LOG_REPLAY_OPS-regionserver/770665a7984d:0-0 {event_type=RS_LOG_REPLAY, pid=15}] regionserver.SplitLogWorker(218): Done splitting WAL hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/WALs/770665a7984d,44657,1731493541444-splitting/770665a7984d%2C44657%2C1731493541444.meta.1731493543959.meta 2024-11-13T10:26:01,593 DEBUG [RS_LOG_REPLAY_OPS-regionserver/770665a7984d:0-0 {event_type=RS_LOG_REPLAY, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-11-13T10:26:01,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45401 {}] master.HMaster(4169): Remote procedure done, pid=15 2024-11-13T10:26:01,600 INFO [PEWorker-4 {}] wal.WALSplitUtil(143): Moved hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/WALs/770665a7984d,44657,1731493541444-splitting/770665a7984d%2C44657%2C1731493541444.meta.1731493543959.meta to hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/oldWALs 2024-11-13T10:26:01,603 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=15, resume processing ppid=14 2024-11-13T10:26:01,603 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=15, ppid=14, state=SUCCESS, hasLock=false; SplitWALRemoteProcedure 770665a7984d%2C44657%2C1731493541444.meta.1731493543959.meta, worker=770665a7984d,46143,1731493541627 in 224 msec 2024-11-13T10:26:01,605 DEBUG [PEWorker-1 {}] master.SplitWALManager(172): Release split WAL worker=770665a7984d,46143,1731493541627 2024-11-13T10:26:01,608 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-13T10:26:01,608 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; SplitWALProcedure 770665a7984d%2C44657%2C1731493541444.meta.1731493543959.meta, worker=770665a7984d,46143,1731493541627 in 236 msec 2024-11-13T10:26:01,610 INFO [PEWorker-5 {}] master.SplitLogManager(171): hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/WALs/770665a7984d,44657,1731493541444-splitting dir is empty, no logs to split. 2024-11-13T10:26:01,610 INFO [PEWorker-5 {}] master.SplitWALManager(105): 770665a7984d,44657,1731493541444 WAL count=0, meta=true 2024-11-13T10:26:01,610 DEBUG [PEWorker-5 {}] procedure.ServerCrashProcedure(329): Check if 770665a7984d,44657,1731493541444 WAL splitting is done? wals=0, meta=true 2024-11-13T10:26:01,612 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=16, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-13T10:26:01,613 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T10:26:01,614 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=16, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OPEN, location=null; forceNewPlan=true, retain=false 2024-11-13T10:26:01,759 DEBUG [Async-Client-Retry-Timer-pool-0 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T10:26:01,761 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=770665a7984d,44657,1731493541444, seqNum=-1] 2024-11-13T10:26:01,762 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.AbstractRpcClient(357): Not trying to connect to 770665a7984d:44657 this server is in the failed servers list 2024-11-13T10:26:01,762 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncRegionLocatorHelper(64): Try updating region=hbase:meta,,1.1588230740, hostname=770665a7984d,44657,1731493541444, seqNum=-1 , the old value is region=hbase:meta,,1.1588230740, hostname=770665a7984d,44657,1731493541444, seqNum=-1, error=org.apache.hadoop.hbase.ipc.FailedServerException: Call to address=770665a7984d:44657 failed on local exception: org.apache.hadoop.hbase.ipc.FailedServerException: This server is in the failed servers list: 770665a7984d:44657 2024-11-13T10:26:01,762 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=hbase:meta,,1.1588230740, hostname=770665a7984d,44657,1731493541444, seqNum=-1 is org.apache.hadoop.hbase.ipc.FailedServerException: This server is in the failed servers list: 770665a7984d:44657 2024-11-13T10:26:01,762 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncRegionLocatorHelper(88): Try removing region=hbase:meta,,1.1588230740, hostname=770665a7984d,44657,1731493541444, seqNum=-1 from cache 2024-11-13T10:26:01,764 DEBUG [770665a7984d:45401 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=2, allServersCount=2 2024-11-13T10:26:01,765 DEBUG [770665a7984d:45401 {}] balancer.BalancerClusterState(204): Hosts are {770665a7984d=0} racks are {/default-rack=0} 2024-11-13T10:26:01,765 DEBUG [770665a7984d:45401 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-13T10:26:01,765 DEBUG [770665a7984d:45401 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-13T10:26:01,765 DEBUG [770665a7984d:45401 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-13T10:26:01,765 DEBUG [770665a7984d:45401 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-13T10:26:01,765 INFO [770665a7984d:45401 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-13T10:26:01,765 INFO [770665a7984d:45401 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-13T10:26:01,765 DEBUG [770665a7984d:45401 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-13T10:26:01,765 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=16 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=770665a7984d,46143,1731493541627 2024-11-13T10:26:01,767 INFO [PEWorker-2 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 770665a7984d,46143,1731493541627, state=OPENING 2024-11-13T10:26:01,769 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36821-0x10110dc99880002, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T10:26:01,770 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46143-0x10110dc99880003, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T10:26:01,770 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45401-0x10110dc99880000, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T10:26:01,770 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T10:26:01,770 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T10:26:01,770 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T10:26:01,770 DEBUG [PEWorker-2 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=16, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T10:26:01,770 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=770665a7984d,46143,1731493541627}] 2024-11-13T10:26:01,928 INFO [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=17}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-13T10:26:01,928 INFO [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=17}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-13T10:26:01,929 INFO [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=17}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-13T10:26:01,931 INFO [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=17}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=770665a7984d%2C46143%2C1731493541627.meta, suffix=.meta, logDir=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/WALs/770665a7984d,46143,1731493541627, archiveDir=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/oldWALs, maxLogs=32 2024-11-13T10:26:01,945 DEBUG [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=17}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/WALs/770665a7984d,46143,1731493541627/770665a7984d%2C46143%2C1731493541627.meta.1731493561931.meta, exclude list is [], retry=0 2024-11-13T10:26:01,948 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44787,DS-1af27950-119b-421a-9580-8a6fa7d1ebb0,DISK] 2024-11-13T10:26:01,948 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38649,DS-4caa984b-780d-4d07-9178-6a891c1e8e45,DISK] 2024-11-13T10:26:01,948 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45097,DS-2ac43560-8e20-498a-852c-1b3a1f0157e9,DISK] 2024-11-13T10:26:01,951 INFO [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=17}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/WALs/770665a7984d,46143,1731493541627/770665a7984d%2C46143%2C1731493541627.meta.1731493561931.meta 2024-11-13T10:26:01,951 DEBUG [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=17}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45409:45409),(127.0.0.1/127.0.0.1:35745:35745),(127.0.0.1/127.0.0.1:42913:42913)] 2024-11-13T10:26:01,951 DEBUG [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-13T10:26:01,952 DEBUG [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=17}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-13T10:26:01,952 DEBUG [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-13T10:26:01,952 INFO [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-13T10:26:01,952 DEBUG [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-13T10:26:01,952 DEBUG [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T10:26:01,952 DEBUG [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-13T10:26:01,952 DEBUG [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-13T10:26:01,959 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T10:26:01,960 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T10:26:01,960 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:01,961 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T10:26:01,961 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T10:26:01,962 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T10:26:01,962 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:01,962 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T10:26:01,962 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T10:26:01,963 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T10:26:01,963 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:01,964 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T10:26:01,964 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T10:26:01,964 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T10:26:01,964 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:01,965 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T10:26:01,965 DEBUG [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T10:26:01,966 DEBUG [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/hbase/meta/1588230740 2024-11-13T10:26:01,967 DEBUG [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/hbase/meta/1588230740 2024-11-13T10:26:01,968 INFO [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/hbase/meta/1588230740/recovered.edits/0000000000000000018 2024-11-13T10:26:01,970 DEBUG [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=17}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/hbase/meta/1588230740/recovered.edits/0000000000000000018: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-13T10:26:01,973 DEBUG [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(5793): Applied 40, skipped 0, firstSequenceIdInLog=4, maxSequenceIdInLog=18, path=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/hbase/meta/1588230740/recovered.edits/0000000000000000018 2024-11-13T10:26:01,973 INFO [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.67 KB heapSize=9.66 KB 2024-11-13T10:26:01,994 DEBUG [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/hbase/meta/1588230740/.tmp/info/75d61de3e87240d1a5217640cd203ffa is 205, key is testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df./info:regioninfo/1731493561033/Put/seqid=0 2024-11-13T10:26:02,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741894_1072 (size=11177) 2024-11-13T10:26:02,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741894_1072 (size=11177) 2024-11-13T10:26:02,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741894_1072 (size=11177) 2024-11-13T10:26:02,003 INFO [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=5.46 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/hbase/meta/1588230740/.tmp/info/75d61de3e87240d1a5217640cd203ffa 2024-11-13T10:26:02,024 DEBUG [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/hbase/meta/1588230740/.tmp/ns/0a25d5e70c4542cbacd50b7d0336bac8 is 43, key is default/ns:d/1731493544168/Put/seqid=0 2024-11-13T10:26:02,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741895_1073 (size=5153) 2024-11-13T10:26:02,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741895_1073 (size=5153) 2024-11-13T10:26:02,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741895_1073 (size=5153) 2024-11-13T10:26:02,033 INFO [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/hbase/meta/1588230740/.tmp/ns/0a25d5e70c4542cbacd50b7d0336bac8 2024-11-13T10:26:02,062 DEBUG [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/hbase/meta/1588230740/.tmp/table/19671fc1874f4bdd9c5adc67cbba672f is 78, key is testReplayEditsAfterRegionMovedWithMultiCF/table:state/1731493559255/Put/seqid=0 2024-11-13T10:26:02,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741896_1074 (size=5431) 2024-11-13T10:26:02,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741896_1074 (size=5431) 2024-11-13T10:26:02,071 INFO [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=148 B at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/hbase/meta/1588230740/.tmp/table/19671fc1874f4bdd9c5adc67cbba672f 2024-11-13T10:26:02,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741896_1074 (size=5431) 2024-11-13T10:26:02,078 DEBUG [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/hbase/meta/1588230740/.tmp/info/75d61de3e87240d1a5217640cd203ffa as hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/hbase/meta/1588230740/info/75d61de3e87240d1a5217640cd203ffa 2024-11-13T10:26:02,085 INFO [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/hbase/meta/1588230740/info/75d61de3e87240d1a5217640cd203ffa, entries=36, sequenceid=18, filesize=10.9 K 2024-11-13T10:26:02,086 DEBUG [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/hbase/meta/1588230740/.tmp/ns/0a25d5e70c4542cbacd50b7d0336bac8 as hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/hbase/meta/1588230740/ns/0a25d5e70c4542cbacd50b7d0336bac8 2024-11-13T10:26:02,093 INFO [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/hbase/meta/1588230740/ns/0a25d5e70c4542cbacd50b7d0336bac8, entries=2, sequenceid=18, filesize=5.0 K 2024-11-13T10:26:02,094 DEBUG [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/hbase/meta/1588230740/.tmp/table/19671fc1874f4bdd9c5adc67cbba672f as hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/hbase/meta/1588230740/table/19671fc1874f4bdd9c5adc67cbba672f 2024-11-13T10:26:02,100 INFO [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/hbase/meta/1588230740/table/19671fc1874f4bdd9c5adc67cbba672f, entries=2, sequenceid=18, filesize=5.3 K 2024-11-13T10:26:02,101 INFO [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(3140): Finished flush of dataSize ~5.67 KB/5811, heapSize ~9.37 KB/9592, currentSize=0 B/0 for 1588230740 in 128ms, sequenceid=18, compaction requested=false; wal=null 2024-11-13T10:26:02,101 DEBUG [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-13T10:26:02,102 DEBUG [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/hbase/meta/1588230740/recovered.edits/0000000000000000018 2024-11-13T10:26:02,104 DEBUG [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T10:26:02,104 DEBUG [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T10:26:02,105 DEBUG [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-13T10:26:02,106 DEBUG [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T10:26:02,109 DEBUG [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=17}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/hbase/meta/1588230740/recovered.edits/18.seqid, newMaxSeqId=18, maxSeqId=1 2024-11-13T10:26:02,110 INFO [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=19; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74057557, jitterRate=0.10354359447956085}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-13T10:26:02,110 DEBUG [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-13T10:26:02,111 DEBUG [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731493561953Writing region info on filesystem at 1731493561953Initializing all the Stores at 1731493561953Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731493561953Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731493561959 (+6 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493561959Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731493561959Obtaining lock to block concurrent updates at 1731493561973 (+14 ms)Preparing flush snapshotting stores in 1588230740 at 1731493561973Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=5811, getHeapSize=9832, getOffHeapSize=0, getCellsCount=40 at 1731493561973Flushing stores of hbase:meta,,1.1588230740 at 1731493561973Flushing 1588230740/info: creating writer at 1731493561974 (+1 ms)Flushing 1588230740/info: appending metadata at 1731493561993 (+19 ms)Flushing 1588230740/info: closing flushed file at 1731493561993Flushing 1588230740/ns: creating writer at 1731493562008 (+15 ms)Flushing 1588230740/ns: appending metadata at 1731493562023 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1731493562023Flushing 1588230740/table: creating writer at 1731493562039 (+16 ms)Flushing 1588230740/table: appending metadata at 1731493562061 (+22 ms)Flushing 1588230740/table: closing flushed file at 1731493562061Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5c83cdc8: reopening flushed file at 1731493562077 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1233b225: reopening flushed file at 1731493562085 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2c7ca3c9: reopening flushed file at 1731493562093 (+8 ms)Finished flush of dataSize ~5.67 KB/5811, heapSize ~9.37 KB/9592, currentSize=0 B/0 for 1588230740 in 128ms, sequenceid=18, compaction requested=false; wal=null at 1731493562101 (+8 ms)Cleaning up temporary data from old regions at 1731493562104 (+3 ms)Running coprocessor post-open hooks at 1731493562110 (+6 ms)Region opened successfully at 1731493562110 2024-11-13T10:26:02,113 INFO [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=17, masterSystemTime=1731493561923 2024-11-13T10:26:02,116 DEBUG [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-13T10:26:02,116 INFO [RS_OPEN_META-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_META, pid=17}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-13T10:26:02,116 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=16 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=19, regionLocation=770665a7984d,46143,1731493541627 2024-11-13T10:26:02,118 INFO [PEWorker-1 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 770665a7984d,46143,1731493541627, state=OPEN 2024-11-13T10:26:02,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45401-0x10110dc99880000, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T10:26:02,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46143-0x10110dc99880003, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T10:26:02,120 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T10:26:02,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36821-0x10110dc99880002, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T10:26:02,120 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T10:26:02,120 DEBUG [PEWorker-1 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=17, ppid=16, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=770665a7984d,46143,1731493541627 2024-11-13T10:26:02,120 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T10:26:02,127 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=17, resume processing ppid=16 2024-11-13T10:26:02,127 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=17, ppid=16, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=770665a7984d,46143,1731493541627 in 350 msec 2024-11-13T10:26:02,130 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=16, resume processing ppid=13 2024-11-13T10:26:02,130 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=16, ppid=13, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 515 msec 2024-11-13T10:26:02,130 INFO [PEWorker-3 {}] procedure.ServerCrashProcedure(207): 770665a7984d,44657,1731493541444 had 2 regions 2024-11-13T10:26:02,131 INFO [PEWorker-3 {}] procedure.ServerCrashProcedure(339): Splitting WALs pid=13, state=RUNNABLE:SERVER_CRASH_SPLIT_LOGS, hasLock=true; ServerCrashProcedure 770665a7984d,44657,1731493541444, splitWal=true, meta=true, isMeta: false 2024-11-13T10:26:02,133 INFO [PEWorker-3 {}] master.SplitWALManager(105): 770665a7984d,44657,1731493541444 WAL count=1, meta=false 2024-11-13T10:26:02,133 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=18, ppid=13, state=RUNNABLE:ACQUIRE_SPLIT_WAL_WORKER, hasLock=false; SplitWALProcedure 770665a7984d%2C44657%2C1731493541444.1731493543503}] 2024-11-13T10:26:02,135 DEBUG [PEWorker-2 {}] master.SplitWALManager(158): Acquired split WAL worker=770665a7984d,46143,1731493541627 2024-11-13T10:26:02,136 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE, hasLock=false; SplitWALRemoteProcedure 770665a7984d%2C44657%2C1731493541444.1731493543503, worker=770665a7984d,46143,1731493541627}] 2024-11-13T10:26:02,269 DEBUG [Async-Client-Retry-Timer-pool-0 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T10:26:02,270 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=770665a7984d,46143,1731493541627, seqNum=-1] 2024-11-13T10:26:02,270 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T10:26:02,273 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57396, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T10:26:02,290 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46143 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SplitWALCallable, pid=19 2024-11-13T10:26:02,307 INFO [RS_LOG_REPLAY_OPS-regionserver/770665a7984d:0-1 {event_type=RS_LOG_REPLAY, pid=19}] wal.WALSplitter(299): Splitting hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/WALs/770665a7984d,44657,1731493541444-splitting/770665a7984d%2C44657%2C1731493541444.1731493543503, size=1.4 K (1407bytes) 2024-11-13T10:26:02,307 INFO [RS_LOG_REPLAY_OPS-regionserver/770665a7984d:0-1 {event_type=RS_LOG_REPLAY, pid=19}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/WALs/770665a7984d,44657,1731493541444-splitting/770665a7984d%2C44657%2C1731493541444.1731493543503 2024-11-13T10:26:02,308 INFO [RS_LOG_REPLAY_OPS-regionserver/770665a7984d:0-1 {event_type=RS_LOG_REPLAY, pid=19}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/WALs/770665a7984d,44657,1731493541444-splitting/770665a7984d%2C44657%2C1731493541444.1731493543503 after 1ms 2024-11-13T10:26:02,310 DEBUG [RS_LOG_REPLAY_OPS-regionserver/770665a7984d:0-1 {event_type=RS_LOG_REPLAY, pid=19}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/WALs/770665a7984d,44657,1731493541444-splitting/770665a7984d%2C44657%2C1731493541444.1731493543503: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-13T10:26:02,310 INFO [RS_LOG_REPLAY_OPS-regionserver/770665a7984d:0-1 {event_type=RS_LOG_REPLAY, pid=19}] wal.WALSplitter(310): Open hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/WALs/770665a7984d,44657,1731493541444-splitting/770665a7984d%2C44657%2C1731493541444.1731493543503 took 3ms 2024-11-13T10:26:02,313 DEBUG [RS_LOG_REPLAY_OPS-regionserver/770665a7984d:0-1 {event_type=RS_LOG_REPLAY, pid=19}] wal.WALSplitter(352): Last flushed sequenceid for 70a598aa9b18017afa50633b8eb231df: last_flushed_sequence_id: 12 store_sequence_id { family_name: "cf1" sequence_id: 12 } store_sequence_id { family_name: "cf2" sequence_id: 12 } 2024-11-13T10:26:02,313 DEBUG [RS_LOG_REPLAY_OPS-regionserver/770665a7984d:0-1 {event_type=RS_LOG_REPLAY, pid=19}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/WALs/770665a7984d,44657,1731493541444-splitting/770665a7984d%2C44657%2C1731493541444.1731493543503 so closing down 2024-11-13T10:26:02,313 DEBUG [RS_LOG_REPLAY_OPS-regionserver/770665a7984d:0-1 {event_type=RS_LOG_REPLAY, pid=19}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-13T10:26:02,314 INFO [RS_LOG_REPLAY_OPS-regionserver/770665a7984d:0-1 {event_type=RS_LOG_REPLAY, pid=19}] wal.OutputSink(145): 3 split writer threads finished 2024-11-13T10:26:02,314 INFO [RS_LOG_REPLAY_OPS-regionserver/770665a7984d:0-1 {event_type=RS_LOG_REPLAY, pid=19}] wal.WALSplitter(425): Processed 6 edits across 0 Regions in 3 ms; skipped=6; WAL=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/WALs/770665a7984d,44657,1731493541444-splitting/770665a7984d%2C44657%2C1731493541444.1731493543503, size=1.4 K, length=1407, corrupted=false, cancelled=false 2024-11-13T10:26:02,314 DEBUG [RS_LOG_REPLAY_OPS-regionserver/770665a7984d:0-1 {event_type=RS_LOG_REPLAY, pid=19}] wal.WALSplitter(428): Completed split of hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/WALs/770665a7984d,44657,1731493541444-splitting/770665a7984d%2C44657%2C1731493541444.1731493543503, journal: Splitting hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/WALs/770665a7984d,44657,1731493541444-splitting/770665a7984d%2C44657%2C1731493541444.1731493543503, size=1.4 K (1407bytes) at 1731493562307Finishing writing output for hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/WALs/770665a7984d,44657,1731493541444-splitting/770665a7984d%2C44657%2C1731493541444.1731493543503 so closing down at 1731493562313 (+6 ms)3 split writer threads finished at 1731493562314 (+1 ms)Processed 6 edits across 0 Regions in 3 ms; skipped=6; WAL=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/WALs/770665a7984d,44657,1731493541444-splitting/770665a7984d%2C44657%2C1731493541444.1731493543503, size=1.4 K, length=1407, corrupted=false, cancelled=false at 1731493562314 2024-11-13T10:26:02,314 DEBUG [RS_LOG_REPLAY_OPS-regionserver/770665a7984d:0-1 {event_type=RS_LOG_REPLAY, pid=19}] regionserver.SplitLogWorker(218): Done splitting WAL hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/WALs/770665a7984d,44657,1731493541444-splitting/770665a7984d%2C44657%2C1731493541444.1731493543503 2024-11-13T10:26:02,314 DEBUG [RS_LOG_REPLAY_OPS-regionserver/770665a7984d:0-1 {event_type=RS_LOG_REPLAY, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-11-13T10:26:02,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45401 {}] master.HMaster(4169): Remote procedure done, pid=19 2024-11-13T10:26:02,319 INFO [PEWorker-1 {}] wal.WALSplitUtil(143): Moved hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/WALs/770665a7984d,44657,1731493541444-splitting/770665a7984d%2C44657%2C1731493541444.1731493543503 to hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/oldWALs 2024-11-13T10:26:02,322 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=19, resume processing ppid=18 2024-11-13T10:26:02,322 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=19, ppid=18, state=SUCCESS, hasLock=false; SplitWALRemoteProcedure 770665a7984d%2C44657%2C1731493541444.1731493543503, worker=770665a7984d,46143,1731493541627 in 183 msec 2024-11-13T10:26:02,324 DEBUG [PEWorker-5 {}] master.SplitWALManager(172): Release split WAL worker=770665a7984d,46143,1731493541627 2024-11-13T10:26:02,327 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=18, resume processing ppid=13 2024-11-13T10:26:02,327 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=18, ppid=13, state=SUCCESS, hasLock=false; SplitWALProcedure 770665a7984d%2C44657%2C1731493541444.1731493543503, worker=770665a7984d,46143,1731493541627 in 191 msec 2024-11-13T10:26:02,328 INFO [PEWorker-3 {}] master.SplitLogManager(171): hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/WALs/770665a7984d,44657,1731493541444-splitting dir is empty, no logs to split. 2024-11-13T10:26:02,329 INFO [PEWorker-3 {}] master.SplitWALManager(105): 770665a7984d,44657,1731493541444 WAL count=0, meta=false 2024-11-13T10:26:02,329 DEBUG [PEWorker-3 {}] procedure.ServerCrashProcedure(329): Check if 770665a7984d,44657,1731493541444 WAL splitting is done? wals=0, meta=false 2024-11-13T10:26:02,331 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=20, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=70a598aa9b18017afa50633b8eb231df, ASSIGN}] 2024-11-13T10:26:02,332 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=20, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=70a598aa9b18017afa50633b8eb231df, ASSIGN 2024-11-13T10:26:02,333 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=20, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=70a598aa9b18017afa50633b8eb231df, ASSIGN; state=OPEN, location=null; forceNewPlan=true, retain=false 2024-11-13T10:26:02,399 DEBUG [Async-Client-Retry-Timer-pool-0 {}] ipc.AbstractRpcClient(357): Not trying to connect to 770665a7984d:44657 this server is in the failed servers list 2024-11-13T10:26:02,400 DEBUG [Async-Client-Retry-Timer-pool-0 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df., hostname=770665a7984d,44657,1731493541444, seqNum=12 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df., hostname=770665a7984d,44657,1731493541444, seqNum=12, error=org.apache.hadoop.hbase.ipc.FailedServerException: Call to address=770665a7984d:44657 failed on local exception: org.apache.hadoop.hbase.ipc.FailedServerException: This server is in the failed servers list: 770665a7984d:44657 2024-11-13T10:26:02,400 DEBUG [Async-Client-Retry-Timer-pool-0 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df., hostname=770665a7984d,44657,1731493541444, seqNum=12 is org.apache.hadoop.hbase.ipc.FailedServerException: This server is in the failed servers list: 770665a7984d:44657 2024-11-13T10:26:02,400 DEBUG [Async-Client-Retry-Timer-pool-0 {}] client.AsyncRegionLocatorHelper(88): Try removing region=testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df., hostname=770665a7984d,44657,1731493541444, seqNum=12 from cache 2024-11-13T10:26:02,484 DEBUG [770665a7984d:45401 {}] balancer.BalancerClusterState(204): Hosts are {770665a7984d=0} racks are {/default-rack=0} 2024-11-13T10:26:02,484 DEBUG [770665a7984d:45401 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-13T10:26:02,484 DEBUG [770665a7984d:45401 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-13T10:26:02,484 DEBUG [770665a7984d:45401 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-13T10:26:02,484 DEBUG [770665a7984d:45401 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-13T10:26:02,484 INFO [770665a7984d:45401 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-13T10:26:02,484 INFO [770665a7984d:45401 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-13T10:26:02,484 DEBUG [770665a7984d:45401 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-13T10:26:02,485 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=20 updating hbase:meta row=70a598aa9b18017afa50633b8eb231df, regionState=OPENING, regionLocation=770665a7984d,46143,1731493541627 2024-11-13T10:26:02,486 WARN [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.NettyRpcConnection$2(409): Exception encountered while connecting to the server 770665a7984d:44657 org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: finishConnect(..) failed: Connection refused: 770665a7984d/172.17.0.2:44657 Caused by: java.net.ConnectException: finishConnect(..) failed: Connection refused at org.apache.hbase.thirdparty.io.netty.channel.unix.Errors.newConnectException0(Errors.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.unix.Errors.handleConnectErrno(Errors.java:131) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.unix.Socket.finishConnect(Socket.java:359) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.doFinishConnect(AbstractEpollChannel.java:715) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:692) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T10:26:02,487 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncRegionLocatorHelper(64): Try updating region=hbase:meta,,1.1588230740, hostname=770665a7984d,44657,1731493541444, seqNum=-1 , the old value is region=hbase:meta,,1.1588230740, hostname=770665a7984d,44657,1731493541444, seqNum=-1, error=java.net.ConnectException: Call to address=770665a7984d:44657 failed on connection exception: org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: finishConnect(..) failed: Connection refused: 770665a7984d/172.17.0.2:44657 2024-11-13T10:26:02,487 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=hbase:meta,,1.1588230740, hostname=770665a7984d,44657,1731493541444, seqNum=-1 is java.net.ConnectException: finishConnect(..) failed: Connection refused 2024-11-13T10:26:02,488 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncRegionLocatorHelper(88): Try removing region=hbase:meta,,1.1588230740, hostname=770665a7984d,44657,1731493541444, seqNum=-1 from cache 2024-11-13T10:26:02,488 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.FailedServers(52): Added failed server with address 770665a7984d:44657 to list caused by org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: finishConnect(..) failed: Connection refused: 770665a7984d/172.17.0.2:44657 2024-11-13T10:26:02,599 DEBUG [Async-Client-Retry-Timer-pool-0 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T10:26:02,599 DEBUG [Async-Client-Retry-Timer-pool-0 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=770665a7984d,46143,1731493541627, seqNum=-1] 2024-11-13T10:26:02,600 DEBUG [Async-Client-Retry-Timer-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T10:26:02,601 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37569, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T10:26:02,606 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=20, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=70a598aa9b18017afa50633b8eb231df, ASSIGN because future has completed 2024-11-13T10:26:02,606 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE, hasLock=false; OpenRegionProcedure 70a598aa9b18017afa50633b8eb231df, server=770665a7984d,46143,1731493541627}] 2024-11-13T10:26:02,618 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testReplayEditsAfterRegionMovedWithMultiCF', row='r1', locateType=CURRENT is [region=testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df., hostname=770665a7984d,44657,1731493541444, seqNum=18] 2024-11-13T10:26:02,619 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] ipc.AbstractRpcClient(357): Not trying to connect to 770665a7984d:44657 this server is in the failed servers list 2024-11-13T10:26:02,619 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df., hostname=770665a7984d,44657,1731493541444, seqNum=18 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df., hostname=770665a7984d,44657,1731493541444, seqNum=18, error=org.apache.hadoop.hbase.ipc.FailedServerException: Call to address=770665a7984d:44657 failed on local exception: org.apache.hadoop.hbase.ipc.FailedServerException: This server is in the failed servers list: 770665a7984d:44657 2024-11-13T10:26:02,619 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df., hostname=770665a7984d,44657,1731493541444, seqNum=18 is org.apache.hadoop.hbase.ipc.FailedServerException: This server is in the failed servers list: 770665a7984d:44657 2024-11-13T10:26:02,619 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] client.AsyncRegionLocatorHelper(88): Try removing region=testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df., hostname=770665a7984d,44657,1731493541444, seqNum=18 from cache 2024-11-13T10:26:02,772 INFO [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] handler.AssignRegionHandler(132): Open testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. 2024-11-13T10:26:02,773 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(7752): Opening region: {ENCODED => 70a598aa9b18017afa50633b8eb231df, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df.', STARTKEY => '', ENDKEY => ''} 2024-11-13T10:26:02,773 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterRegionMovedWithMultiCF 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:26:02,773 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T10:26:02,773 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(7794): checking encryption for 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:26:02,774 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(7797): checking classloading for 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:26:02,775 INFO [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:26:02,777 INFO [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 70a598aa9b18017afa50633b8eb231df columnFamilyName cf1 2024-11-13T10:26:02,777 DEBUG [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:02,787 DEBUG [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df/cf1/6cdd26a37bcd48f98aea20841c0fab48 2024-11-13T10:26:02,787 INFO [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] regionserver.HStore(327): Store=70a598aa9b18017afa50633b8eb231df/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:02,787 INFO [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:26:02,788 INFO [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 70a598aa9b18017afa50633b8eb231df columnFamilyName cf2 2024-11-13T10:26:02,788 DEBUG [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:02,795 DEBUG [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df/cf2/2882fc4cc2c04ac0a353e84c3a9e789c 2024-11-13T10:26:02,795 INFO [StoreOpener-70a598aa9b18017afa50633b8eb231df-1 {}] regionserver.HStore(327): Store=70a598aa9b18017afa50633b8eb231df/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:02,795 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1038): replaying wal for 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:26:02,796 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df 2024-11-13T10:26:02,797 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df 2024-11-13T10:26:02,798 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1048): stopping wal replay for 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:26:02,798 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1060): Cleaning up temporary data for 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:26:02,798 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterRegionMovedWithMultiCF descriptor;using region.getMemStoreFlushHeapSize/# of families (64.0 M)) instead. 2024-11-13T10:26:02,800 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1093): writing seq id for 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:26:02,801 INFO [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1114): Opened 70a598aa9b18017afa50633b8eb231df; next sequenceid=18; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67678580, jitterRate=0.008489429950714111}}}, FlushLargeStoresPolicy{flushSizeLowerBound=67108864} 2024-11-13T10:26:02,802 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:26:02,803 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1006): Region open journal for 70a598aa9b18017afa50633b8eb231df: Running coprocessor pre-open hook at 1731493562774Writing region info on filesystem at 1731493562774Initializing all the Stores at 1731493562775 (+1 ms)Instantiating store for column family {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493562775Instantiating store for column family {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493562775Cleaning up temporary data from old regions at 1731493562798 (+23 ms)Running coprocessor post-open hooks at 1731493562802 (+4 ms)Region opened successfully at 1731493562803 (+1 ms) 2024-11-13T10:26:02,805 INFO [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegionServer(2236): Post open deploy tasks for testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df., pid=21, masterSystemTime=1731493562767 2024-11-13T10:26:02,808 DEBUG [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegionServer(2266): Finished post open deploy task for testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. 2024-11-13T10:26:02,808 INFO [RS_OPEN_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] handler.AssignRegionHandler(153): Opened testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. 2024-11-13T10:26:02,809 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=20 updating hbase:meta row=70a598aa9b18017afa50633b8eb231df, regionState=OPEN, openSeqNum=18, regionLocation=770665a7984d,46143,1731493541627 2024-11-13T10:26:02,825 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=21, ppid=20, state=RUNNABLE, hasLock=false; OpenRegionProcedure 70a598aa9b18017afa50633b8eb231df, server=770665a7984d,46143,1731493541627 because future has completed 2024-11-13T10:26:02,871 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=21, resume processing ppid=20 2024-11-13T10:26:02,871 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=21, ppid=20, state=SUCCESS, hasLock=false; OpenRegionProcedure 70a598aa9b18017afa50633b8eb231df, server=770665a7984d,46143,1731493541627 in 228 msec 2024-11-13T10:26:02,901 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=20, resume processing ppid=13 2024-11-13T10:26:02,901 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=20, ppid=13, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=70a598aa9b18017afa50633b8eb231df, ASSIGN in 540 msec 2024-11-13T10:26:02,901 INFO [PEWorker-4 {}] procedure.ServerCrashProcedure(291): removed crashed server 770665a7984d,44657,1731493541444 after splitting done 2024-11-13T10:26:02,917 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; ServerCrashProcedure 770665a7984d,44657,1731493541444, splitWal=true, meta=true in 1.5470 sec 2024-11-13T10:26:02,937 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testReplayEditsAfterRegionMovedWithMultiCF', row='r1', locateType=CURRENT is [region=testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df., hostname=770665a7984d,46143,1731493541627, seqNum=18] 2024-11-13T10:26:02,988 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterRegionMovedWithMultiCF Thread=402 (was 401) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-51990266_22 at /127.0.0.1:34686 [Receiving block BP-357896810-172.17.0.2-1731493536919:blk_1073741893_1071] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-51990266_22 at /127.0.0.1:47408 [Receiving block BP-357896810-172.17.0.2-1731493536919:blk_1073741893_1071] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/770665a7984d:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-357896810-172.17.0.2-1731493536919:blk_1073741893_1071, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_LOG_REPLAY_OPS-regionserver/770665a7984d:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-0-hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510-prefix:770665a7984d,46143,1731493541627.meta java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-51990266_22 at /127.0.0.1:46076 [Waiting for operation #37] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-51990266_22 at /127.0.0.1:57732 [Receiving block BP-357896810-172.17.0.2-1731493536919:blk_1073741893_1071] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-515106358_22 at /127.0.0.1:56878 [Waiting for operation #31] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-357896810-172.17.0.2-1731493536919:blk_1073741893_1071, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-357896810-172.17.0.2-1731493536919:blk_1073741893_1071, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-4-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Abort regionserver monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RS_OPEN_REGION-regionserver/770665a7984d:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-4-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-51990266_22 at /127.0.0.1:47950 [Waiting for operation #22] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/770665a7984d:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1028 (was 993) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=563 (was 595), ProcessCount=11 (was 11), AvailableMemoryMB=389 (was 482) 2024-11-13T10:26:02,991 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1028 is superior to 1024 2024-11-13T10:26:03,015 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterPartialFlush Thread=402, OpenFileDescriptor=1028, MaxFileDescriptor=1048576, SystemLoadAverage=563, ProcessCount=11, AvailableMemoryMB=387 2024-11-13T10:26:03,015 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1028 is superior to 1024 2024-11-13T10:26:03,036 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-13T10:26:03,038 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-13T10:26:03,039 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-13T10:26:03,042 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-47106074, suffix=, logDir=hdfs://localhost:41249/hbase/WALs/hregion-47106074, archiveDir=hdfs://localhost:41249/hbase/oldWALs, maxLogs=32 2024-11-13T10:26:03,063 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-47106074/hregion-47106074.1731493563043, exclude list is [], retry=0 2024-11-13T10:26:03,066 DEBUG [AsyncFSWAL-20-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45097,DS-2ac43560-8e20-498a-852c-1b3a1f0157e9,DISK] 2024-11-13T10:26:03,066 DEBUG [AsyncFSWAL-20-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44787,DS-1af27950-119b-421a-9580-8a6fa7d1ebb0,DISK] 2024-11-13T10:26:03,067 DEBUG [AsyncFSWAL-20-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38649,DS-4caa984b-780d-4d07-9178-6a891c1e8e45,DISK] 2024-11-13T10:26:03,077 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-47106074/hregion-47106074.1731493563043 2024-11-13T10:26:03,077 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42913:42913),(127.0.0.1/127.0.0.1:35745:35745),(127.0.0.1/127.0.0.1:45409:45409)] 2024-11-13T10:26:03,077 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 7799c79752c325bd9591a69209db42ad, NAME => 'testReplayEditsWrittenViaHRegion,,1731493563037.7799c79752c325bd9591a69209db42ad.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenViaHRegion', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41249/hbase 2024-11-13T10:26:03,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741898_1076 (size=67) 2024-11-13T10:26:03,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741898_1076 (size=67) 2024-11-13T10:26:03,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741898_1076 (size=67) 2024-11-13T10:26:03,091 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1731493563037.7799c79752c325bd9591a69209db42ad.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T10:26:03,092 INFO [StoreOpener-7799c79752c325bd9591a69209db42ad-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 7799c79752c325bd9591a69209db42ad 2024-11-13T10:26:03,094 INFO [StoreOpener-7799c79752c325bd9591a69209db42ad-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7799c79752c325bd9591a69209db42ad columnFamilyName a 2024-11-13T10:26:03,094 DEBUG [StoreOpener-7799c79752c325bd9591a69209db42ad-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:03,094 INFO [StoreOpener-7799c79752c325bd9591a69209db42ad-1 {}] regionserver.HStore(327): Store=7799c79752c325bd9591a69209db42ad/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:03,094 INFO [StoreOpener-7799c79752c325bd9591a69209db42ad-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 7799c79752c325bd9591a69209db42ad 2024-11-13T10:26:03,096 INFO [StoreOpener-7799c79752c325bd9591a69209db42ad-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7799c79752c325bd9591a69209db42ad columnFamilyName b 2024-11-13T10:26:03,096 DEBUG [StoreOpener-7799c79752c325bd9591a69209db42ad-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:03,096 INFO [StoreOpener-7799c79752c325bd9591a69209db42ad-1 {}] regionserver.HStore(327): Store=7799c79752c325bd9591a69209db42ad/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:03,096 INFO [StoreOpener-7799c79752c325bd9591a69209db42ad-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 7799c79752c325bd9591a69209db42ad 2024-11-13T10:26:03,097 INFO [StoreOpener-7799c79752c325bd9591a69209db42ad-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7799c79752c325bd9591a69209db42ad columnFamilyName c 2024-11-13T10:26:03,097 DEBUG [StoreOpener-7799c79752c325bd9591a69209db42ad-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:03,098 INFO [StoreOpener-7799c79752c325bd9591a69209db42ad-1 {}] regionserver.HStore(327): Store=7799c79752c325bd9591a69209db42ad/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:03,098 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 7799c79752c325bd9591a69209db42ad 2024-11-13T10:26:03,099 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/7799c79752c325bd9591a69209db42ad 2024-11-13T10:26:03,099 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/7799c79752c325bd9591a69209db42ad 2024-11-13T10:26:03,100 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 7799c79752c325bd9591a69209db42ad 2024-11-13T10:26:03,100 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 7799c79752c325bd9591a69209db42ad 2024-11-13T10:26:03,101 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-13T10:26:03,102 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 7799c79752c325bd9591a69209db42ad 2024-11-13T10:26:03,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741833_1009 (size=1407) 2024-11-13T10:26:03,106 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/7799c79752c325bd9591a69209db42ad/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T10:26:03,106 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 7799c79752c325bd9591a69209db42ad; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60686368, jitterRate=-0.0957026481628418}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-13T10:26:03,107 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 7799c79752c325bd9591a69209db42ad: Writing region info on filesystem at 1731493563091Initializing all the Stores at 1731493563092 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493563092Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493563092Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493563092Cleaning up temporary data from old regions at 1731493563100 (+8 ms)Region opened successfully at 1731493563107 (+7 ms) 2024-11-13T10:26:03,107 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 7799c79752c325bd9591a69209db42ad, disabling compactions & flushes 2024-11-13T10:26:03,107 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1731493563037.7799c79752c325bd9591a69209db42ad. 2024-11-13T10:26:03,107 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1731493563037.7799c79752c325bd9591a69209db42ad. 2024-11-13T10:26:03,107 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1731493563037.7799c79752c325bd9591a69209db42ad. after waiting 0 ms 2024-11-13T10:26:03,107 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1731493563037.7799c79752c325bd9591a69209db42ad. 2024-11-13T10:26:03,108 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1731493563037.7799c79752c325bd9591a69209db42ad. 2024-11-13T10:26:03,108 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 7799c79752c325bd9591a69209db42ad: Waiting for close lock at 1731493563107Disabling compacts and flushes for region at 1731493563107Disabling writes for close at 1731493563107Writing region close event to WAL at 1731493563107Closed at 1731493563108 (+1 ms) 2024-11-13T10:26:03,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741897_1075 (size=95) 2024-11-13T10:26:03,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741897_1075 (size=95) 2024-11-13T10:26:03,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741897_1075 (size=95) 2024-11-13T10:26:03,113 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-13T10:26:03,113 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-47106074:(num 1731493563043) 2024-11-13T10:26:03,113 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-13T10:26:03,115 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:41249/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731493563035, archiveDir=hdfs://localhost:41249/hbase/oldWALs, maxLogs=32 2024-11-13T10:26:03,136 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731493563035/wal.1731493563116, exclude list is [], retry=0 2024-11-13T10:26:03,140 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44787,DS-1af27950-119b-421a-9580-8a6fa7d1ebb0,DISK] 2024-11-13T10:26:03,140 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45097,DS-2ac43560-8e20-498a-852c-1b3a1f0157e9,DISK] 2024-11-13T10:26:03,140 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38649,DS-4caa984b-780d-4d07-9178-6a891c1e8e45,DISK] 2024-11-13T10:26:03,142 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731493563035/wal.1731493563116 2024-11-13T10:26:03,142 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35745:35745),(127.0.0.1/127.0.0.1:42913:42913),(127.0.0.1/127.0.0.1:45409:45409)] 2024-11-13T10:26:03,142 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 7799c79752c325bd9591a69209db42ad, NAME => 'testReplayEditsWrittenViaHRegion,,1731493563037.7799c79752c325bd9591a69209db42ad.', STARTKEY => '', ENDKEY => ''} 2024-11-13T10:26:03,143 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1731493563037.7799c79752c325bd9591a69209db42ad.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T10:26:03,143 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 7799c79752c325bd9591a69209db42ad 2024-11-13T10:26:03,143 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 7799c79752c325bd9591a69209db42ad 2024-11-13T10:26:03,145 INFO [StoreOpener-7799c79752c325bd9591a69209db42ad-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 7799c79752c325bd9591a69209db42ad 2024-11-13T10:26:03,146 INFO [StoreOpener-7799c79752c325bd9591a69209db42ad-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7799c79752c325bd9591a69209db42ad columnFamilyName a 2024-11-13T10:26:03,146 DEBUG [StoreOpener-7799c79752c325bd9591a69209db42ad-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:03,146 INFO [StoreOpener-7799c79752c325bd9591a69209db42ad-1 {}] regionserver.HStore(327): Store=7799c79752c325bd9591a69209db42ad/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:03,147 INFO [StoreOpener-7799c79752c325bd9591a69209db42ad-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 7799c79752c325bd9591a69209db42ad 2024-11-13T10:26:03,147 INFO [StoreOpener-7799c79752c325bd9591a69209db42ad-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7799c79752c325bd9591a69209db42ad columnFamilyName b 2024-11-13T10:26:03,147 DEBUG [StoreOpener-7799c79752c325bd9591a69209db42ad-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:03,148 INFO [StoreOpener-7799c79752c325bd9591a69209db42ad-1 {}] regionserver.HStore(327): Store=7799c79752c325bd9591a69209db42ad/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:03,148 INFO [StoreOpener-7799c79752c325bd9591a69209db42ad-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 7799c79752c325bd9591a69209db42ad 2024-11-13T10:26:03,149 INFO [StoreOpener-7799c79752c325bd9591a69209db42ad-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7799c79752c325bd9591a69209db42ad columnFamilyName c 2024-11-13T10:26:03,149 DEBUG [StoreOpener-7799c79752c325bd9591a69209db42ad-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:03,149 INFO [StoreOpener-7799c79752c325bd9591a69209db42ad-1 {}] regionserver.HStore(327): Store=7799c79752c325bd9591a69209db42ad/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:03,150 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 7799c79752c325bd9591a69209db42ad 2024-11-13T10:26:03,150 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/7799c79752c325bd9591a69209db42ad 2024-11-13T10:26:03,152 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/7799c79752c325bd9591a69209db42ad 2024-11-13T10:26:03,153 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 7799c79752c325bd9591a69209db42ad 2024-11-13T10:26:03,153 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 7799c79752c325bd9591a69209db42ad 2024-11-13T10:26:03,153 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-13T10:26:03,155 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 7799c79752c325bd9591a69209db42ad 2024-11-13T10:26:03,156 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 7799c79752c325bd9591a69209db42ad; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63483232, jitterRate=-0.054026126861572266}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-13T10:26:03,156 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 7799c79752c325bd9591a69209db42ad: Writing region info on filesystem at 1731493563143Initializing all the Stores at 1731493563144 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493563144Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493563144Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493563144Cleaning up temporary data from old regions at 1731493563153 (+9 ms)Region opened successfully at 1731493563156 (+3 ms) 2024-11-13T10:26:03,187 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 7799c79752c325bd9591a69209db42ad 3/3 column families, dataSize=2.55 KB heapSize=5.44 KB 2024-11-13T10:26:03,204 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/7799c79752c325bd9591a69209db42ad/.tmp/a/edb84fe69b7a466ca7518feebc5580d2 is 91, key is testReplayEditsWrittenViaHRegion/a:x0/1731493563156/Put/seqid=0 2024-11-13T10:26:03,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741900_1078 (size=5958) 2024-11-13T10:26:03,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741900_1078 (size=5958) 2024-11-13T10:26:03,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741900_1078 (size=5958) 2024-11-13T10:26:03,212 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=33 (bloomFilter=true), to=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/7799c79752c325bd9591a69209db42ad/.tmp/a/edb84fe69b7a466ca7518feebc5580d2 2024-11-13T10:26:03,231 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/7799c79752c325bd9591a69209db42ad/.tmp/b/a9ca35d2ffa441d1a22039053396daff is 91, key is testReplayEditsWrittenViaHRegion/b:x0/1731493563168/Put/seqid=0 2024-11-13T10:26:03,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741901_1079 (size=5958) 2024-11-13T10:26:03,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741901_1079 (size=5958) 2024-11-13T10:26:03,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741901_1079 (size=5958) 2024-11-13T10:26:03,239 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=33 (bloomFilter=true), to=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/7799c79752c325bd9591a69209db42ad/.tmp/b/a9ca35d2ffa441d1a22039053396daff 2024-11-13T10:26:03,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741836_1012 (size=3561) 2024-11-13T10:26:03,259 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/7799c79752c325bd9591a69209db42ad/.tmp/c/417b582c30aa482eb7a181c7635e49d1 is 91, key is testReplayEditsWrittenViaHRegion/c:x0/1731493563178/Put/seqid=0 2024-11-13T10:26:03,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741902_1080 (size=5958) 2024-11-13T10:26:03,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741902_1080 (size=5958) 2024-11-13T10:26:03,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741902_1080 (size=5958) 2024-11-13T10:26:03,266 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=33 (bloomFilter=true), to=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/7799c79752c325bd9591a69209db42ad/.tmp/c/417b582c30aa482eb7a181c7635e49d1 2024-11-13T10:26:03,272 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/7799c79752c325bd9591a69209db42ad/.tmp/a/edb84fe69b7a466ca7518feebc5580d2 as hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/7799c79752c325bd9591a69209db42ad/a/edb84fe69b7a466ca7518feebc5580d2 2024-11-13T10:26:03,276 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/7799c79752c325bd9591a69209db42ad/a/edb84fe69b7a466ca7518feebc5580d2, entries=10, sequenceid=33, filesize=5.8 K 2024-11-13T10:26:03,277 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/7799c79752c325bd9591a69209db42ad/.tmp/b/a9ca35d2ffa441d1a22039053396daff as hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/7799c79752c325bd9591a69209db42ad/b/a9ca35d2ffa441d1a22039053396daff 2024-11-13T10:26:03,283 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/7799c79752c325bd9591a69209db42ad/b/a9ca35d2ffa441d1a22039053396daff, entries=10, sequenceid=33, filesize=5.8 K 2024-11-13T10:26:03,284 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/7799c79752c325bd9591a69209db42ad/.tmp/c/417b582c30aa482eb7a181c7635e49d1 as hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/7799c79752c325bd9591a69209db42ad/c/417b582c30aa482eb7a181c7635e49d1 2024-11-13T10:26:03,289 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/7799c79752c325bd9591a69209db42ad/c/417b582c30aa482eb7a181c7635e49d1, entries=10, sequenceid=33, filesize=5.8 K 2024-11-13T10:26:03,290 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~2.55 KB/2610, heapSize ~5.39 KB/5520, currentSize=0 B/0 for 7799c79752c325bd9591a69209db42ad in 104ms, sequenceid=33, compaction requested=false 2024-11-13T10:26:03,290 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 7799c79752c325bd9591a69209db42ad: 2024-11-13T10:26:03,290 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 7799c79752c325bd9591a69209db42ad, disabling compactions & flushes 2024-11-13T10:26:03,290 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1731493563037.7799c79752c325bd9591a69209db42ad. 2024-11-13T10:26:03,291 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1731493563037.7799c79752c325bd9591a69209db42ad. 2024-11-13T10:26:03,291 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1731493563037.7799c79752c325bd9591a69209db42ad. after waiting 0 ms 2024-11-13T10:26:03,291 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1731493563037.7799c79752c325bd9591a69209db42ad. 2024-11-13T10:26:03,292 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1731493563037.7799c79752c325bd9591a69209db42ad. 2024-11-13T10:26:03,292 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 7799c79752c325bd9591a69209db42ad: Waiting for close lock at 1731493563290Disabling compacts and flushes for region at 1731493563290Disabling writes for close at 1731493563291 (+1 ms)Writing region close event to WAL at 1731493563292 (+1 ms)Closed at 1731493563292 2024-11-13T10:26:03,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741899_1077 (size=3385) 2024-11-13T10:26:03,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741899_1077 (size=3385) 2024-11-13T10:26:03,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741899_1077 (size=3385) 2024-11-13T10:26:03,300 DEBUG [Time-limited test {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/7799c79752c325bd9591a69209db42ad/b/a9ca35d2ffa441d1a22039053396daff to hdfs://localhost:41249/hbase/archive/data/default/testReplayEditsWrittenViaHRegion/7799c79752c325bd9591a69209db42ad/b/a9ca35d2ffa441d1a22039053396daff 2024-11-13T10:26:03,316 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:41249/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731493563035/wal.1731493563116, size=3.3 K (3385bytes) 2024-11-13T10:26:03,317 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41249/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731493563035/wal.1731493563116 2024-11-13T10:26:03,317 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:41249/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731493563035/wal.1731493563116 after 0ms 2024-11-13T10:26:03,319 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:41249/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731493563035/wal.1731493563116: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-13T10:26:03,320 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:41249/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731493563035/wal.1731493563116 took 4ms 2024-11-13T10:26:03,322 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:41249/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731493563035/wal.1731493563116 so closing down 2024-11-13T10:26:03,322 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-13T10:26:03,323 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1731493563116.temp 2024-11-13T10:26:03,324 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/7799c79752c325bd9591a69209db42ad/recovered.edits/0000000000000000003-wal.1731493563116.temp 2024-11-13T10:26:03,325 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-13T10:26:03,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741903_1081 (size=2944) 2024-11-13T10:26:03,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741903_1081 (size=2944) 2024-11-13T10:26:03,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741903_1081 (size=2944) 2024-11-13T10:26:03,334 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/7799c79752c325bd9591a69209db42ad/recovered.edits/0000000000000000003-wal.1731493563116.temp (wrote 30 edits, skipped 0 edits in 0 ms) 2024-11-13T10:26:03,335 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/7799c79752c325bd9591a69209db42ad/recovered.edits/0000000000000000003-wal.1731493563116.temp to hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/7799c79752c325bd9591a69209db42ad/recovered.edits/0000000000000000032 2024-11-13T10:26:03,335 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 32 edits across 1 Regions in 15 ms; skipped=2; WAL=hdfs://localhost:41249/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731493563035/wal.1731493563116, size=3.3 K, length=3385, corrupted=false, cancelled=false 2024-11-13T10:26:03,335 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:41249/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731493563035/wal.1731493563116, journal: Splitting hdfs://localhost:41249/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731493563035/wal.1731493563116, size=3.3 K (3385bytes) at 1731493563317Finishing writing output for hdfs://localhost:41249/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731493563035/wal.1731493563116 so closing down at 1731493563322 (+5 ms)Creating recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/7799c79752c325bd9591a69209db42ad/recovered.edits/0000000000000000003-wal.1731493563116.temp at 1731493563324 (+2 ms)3 split writer threads finished at 1731493563325 (+1 ms)Closed recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/7799c79752c325bd9591a69209db42ad/recovered.edits/0000000000000000003-wal.1731493563116.temp (wrote 30 edits, skipped 0 edits in 0 ms) at 1731493563334 (+9 ms)Rename recovered edits hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/7799c79752c325bd9591a69209db42ad/recovered.edits/0000000000000000003-wal.1731493563116.temp to hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/7799c79752c325bd9591a69209db42ad/recovered.edits/0000000000000000032 at 1731493563335 (+1 ms)Processed 32 edits across 1 Regions in 15 ms; skipped=2; WAL=hdfs://localhost:41249/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731493563035/wal.1731493563116, size=3.3 K, length=3385, corrupted=false, cancelled=false at 1731493563335 2024-11-13T10:26:03,337 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:41249/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731493563035/wal.1731493563116 to hdfs://localhost:41249/hbase/oldWALs/wal.1731493563116 2024-11-13T10:26:03,338 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/7799c79752c325bd9591a69209db42ad/recovered.edits/0000000000000000032 2024-11-13T10:26:03,338 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-13T10:26:03,340 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:41249/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731493563035, archiveDir=hdfs://localhost:41249/hbase/oldWALs, maxLogs=32 2024-11-13T10:26:03,354 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731493563035/wal.1731493563340, exclude list is [], retry=0 2024-11-13T10:26:03,357 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38649,DS-4caa984b-780d-4d07-9178-6a891c1e8e45,DISK] 2024-11-13T10:26:03,357 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45097,DS-2ac43560-8e20-498a-852c-1b3a1f0157e9,DISK] 2024-11-13T10:26:03,357 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44787,DS-1af27950-119b-421a-9580-8a6fa7d1ebb0,DISK] 2024-11-13T10:26:03,359 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731493563035/wal.1731493563340 2024-11-13T10:26:03,359 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45409:45409),(127.0.0.1/127.0.0.1:42913:42913),(127.0.0.1/127.0.0.1:35745:35745)] 2024-11-13T10:26:03,359 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 7799c79752c325bd9591a69209db42ad, NAME => 'testReplayEditsWrittenViaHRegion,,1731493563037.7799c79752c325bd9591a69209db42ad.', STARTKEY => '', ENDKEY => ''} 2024-11-13T10:26:03,359 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1731493563037.7799c79752c325bd9591a69209db42ad.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T10:26:03,360 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 7799c79752c325bd9591a69209db42ad 2024-11-13T10:26:03,360 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 7799c79752c325bd9591a69209db42ad 2024-11-13T10:26:03,361 INFO [StoreOpener-7799c79752c325bd9591a69209db42ad-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 7799c79752c325bd9591a69209db42ad 2024-11-13T10:26:03,362 INFO [StoreOpener-7799c79752c325bd9591a69209db42ad-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7799c79752c325bd9591a69209db42ad columnFamilyName a 2024-11-13T10:26:03,362 DEBUG [StoreOpener-7799c79752c325bd9591a69209db42ad-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:03,367 DEBUG [StoreOpener-7799c79752c325bd9591a69209db42ad-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/7799c79752c325bd9591a69209db42ad/a/edb84fe69b7a466ca7518feebc5580d2 2024-11-13T10:26:03,368 INFO [StoreOpener-7799c79752c325bd9591a69209db42ad-1 {}] regionserver.HStore(327): Store=7799c79752c325bd9591a69209db42ad/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:03,368 INFO [StoreOpener-7799c79752c325bd9591a69209db42ad-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 7799c79752c325bd9591a69209db42ad 2024-11-13T10:26:03,369 INFO [StoreOpener-7799c79752c325bd9591a69209db42ad-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7799c79752c325bd9591a69209db42ad columnFamilyName b 2024-11-13T10:26:03,369 DEBUG [StoreOpener-7799c79752c325bd9591a69209db42ad-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:03,370 INFO [StoreOpener-7799c79752c325bd9591a69209db42ad-1 {}] regionserver.HStore(327): Store=7799c79752c325bd9591a69209db42ad/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:03,370 INFO [StoreOpener-7799c79752c325bd9591a69209db42ad-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 7799c79752c325bd9591a69209db42ad 2024-11-13T10:26:03,371 INFO [StoreOpener-7799c79752c325bd9591a69209db42ad-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7799c79752c325bd9591a69209db42ad columnFamilyName c 2024-11-13T10:26:03,371 DEBUG [StoreOpener-7799c79752c325bd9591a69209db42ad-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:03,376 DEBUG [StoreOpener-7799c79752c325bd9591a69209db42ad-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/7799c79752c325bd9591a69209db42ad/c/417b582c30aa482eb7a181c7635e49d1 2024-11-13T10:26:03,377 INFO [StoreOpener-7799c79752c325bd9591a69209db42ad-1 {}] regionserver.HStore(327): Store=7799c79752c325bd9591a69209db42ad/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:03,377 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 7799c79752c325bd9591a69209db42ad 2024-11-13T10:26:03,378 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/7799c79752c325bd9591a69209db42ad 2024-11-13T10:26:03,379 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/7799c79752c325bd9591a69209db42ad 2024-11-13T10:26:03,380 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/7799c79752c325bd9591a69209db42ad/recovered.edits/0000000000000000032 2024-11-13T10:26:03,382 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/7799c79752c325bd9591a69209db42ad/recovered.edits/0000000000000000032: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-13T10:26:03,383 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 10, skipped 20, firstSequenceIdInLog=3, maxSequenceIdInLog=32, path=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/7799c79752c325bd9591a69209db42ad/recovered.edits/0000000000000000032 2024-11-13T10:26:03,383 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 7799c79752c325bd9591a69209db42ad 3/3 column families, dataSize=870 B heapSize=2.31 KB 2024-11-13T10:26:03,398 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/7799c79752c325bd9591a69209db42ad/.tmp/b/dc406946a1c54a5b8c34a4c668890479 is 91, key is testReplayEditsWrittenViaHRegion/b:x0/1731493563168/Put/seqid=0 2024-11-13T10:26:03,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741905_1083 (size=5958) 2024-11-13T10:26:03,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741905_1083 (size=5958) 2024-11-13T10:26:03,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741905_1083 (size=5958) 2024-11-13T10:26:03,406 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=32 (bloomFilter=true), to=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/7799c79752c325bd9591a69209db42ad/.tmp/b/dc406946a1c54a5b8c34a4c668890479 2024-11-13T10:26:03,412 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/7799c79752c325bd9591a69209db42ad/.tmp/b/dc406946a1c54a5b8c34a4c668890479 as hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/7799c79752c325bd9591a69209db42ad/b/dc406946a1c54a5b8c34a4c668890479 2024-11-13T10:26:03,417 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/7799c79752c325bd9591a69209db42ad/b/dc406946a1c54a5b8c34a4c668890479, entries=10, sequenceid=32, filesize=5.8 K 2024-11-13T10:26:03,418 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~870 B/870, heapSize ~1.80 KB/1840, currentSize=0 B/0 for 7799c79752c325bd9591a69209db42ad in 35ms, sequenceid=32, compaction requested=false; wal=null 2024-11-13T10:26:03,418 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/7799c79752c325bd9591a69209db42ad/recovered.edits/0000000000000000032 2024-11-13T10:26:03,419 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 7799c79752c325bd9591a69209db42ad 2024-11-13T10:26:03,419 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 7799c79752c325bd9591a69209db42ad 2024-11-13T10:26:03,420 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-13T10:26:03,421 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 7799c79752c325bd9591a69209db42ad 2024-11-13T10:26:03,424 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/7799c79752c325bd9591a69209db42ad/recovered.edits/33.seqid, newMaxSeqId=33, maxSeqId=1 2024-11-13T10:26:03,425 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 7799c79752c325bd9591a69209db42ad; next sequenceid=34; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61977678, jitterRate=-0.07646062970161438}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-13T10:26:03,425 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 7799c79752c325bd9591a69209db42ad: Writing region info on filesystem at 1731493563360Initializing all the Stores at 1731493563360Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493563360Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493563361 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493563361Obtaining lock to block concurrent updates at 1731493563383 (+22 ms)Preparing flush snapshotting stores in 7799c79752c325bd9591a69209db42ad at 1731493563383Finished memstore snapshotting testReplayEditsWrittenViaHRegion,,1731493563037.7799c79752c325bd9591a69209db42ad., syncing WAL and waiting on mvcc, flushsize=dataSize=870, getHeapSize=2320, getOffHeapSize=0, getCellsCount=10 at 1731493563384 (+1 ms)Flushing stores of testReplayEditsWrittenViaHRegion,,1731493563037.7799c79752c325bd9591a69209db42ad. at 1731493563384Flushing 7799c79752c325bd9591a69209db42ad/b: creating writer at 1731493563384Flushing 7799c79752c325bd9591a69209db42ad/b: appending metadata at 1731493563398 (+14 ms)Flushing 7799c79752c325bd9591a69209db42ad/b: closing flushed file at 1731493563398Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@11ff428f: reopening flushed file at 1731493563411 (+13 ms)Finished flush of dataSize ~870 B/870, heapSize ~1.80 KB/1840, currentSize=0 B/0 for 7799c79752c325bd9591a69209db42ad in 35ms, sequenceid=32, compaction requested=false; wal=null at 1731493563418 (+7 ms)Cleaning up temporary data from old regions at 1731493563419 (+1 ms)Region opened successfully at 1731493563425 (+6 ms) 2024-11-13T10:26:03,448 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterPartialFlush Thread=411 (was 402) Potentially hanging thread: AsyncFSWAL-20-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2118105142_22 at /127.0.0.1:46076 [Waiting for operation #41] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2118105142_22 at /127.0.0.1:56878 [Waiting for operation #46] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-20-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2118105142_22 at /127.0.0.1:47504 [Receiving block BP-357896810-172.17.0.2-1731493536919:blk_1073741904_1082] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-20-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-357896810-172.17.0.2-1731493536919:blk_1073741904_1082, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-357896810-172.17.0.2-1731493536919:blk_1073741904_1082, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2118105142_22 at /127.0.0.1:34784 [Receiving block BP-357896810-172.17.0.2-1731493536919:blk_1073741904_1082] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-357896810-172.17.0.2-1731493536919:blk_1073741904_1082, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2118105142_22 at /127.0.0.1:57824 [Receiving block BP-357896810-172.17.0.2-1731493536919:blk_1073741904_1082] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2118105142_22 at /127.0.0.1:47950 [Waiting for operation #36] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1104 (was 1028) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=563 (was 563), ProcessCount=11 (was 11), AvailableMemoryMB=376 (was 387) 2024-11-13T10:26:03,449 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1104 is superior to 1024 2024-11-13T10:26:03,464 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterAbortingFlush Thread=411, OpenFileDescriptor=1104, MaxFileDescriptor=1048576, SystemLoadAverage=563, ProcessCount=11, AvailableMemoryMB=375 2024-11-13T10:26:03,464 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1104 is superior to 1024 2024-11-13T10:26:03,488 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-13T10:26:03,490 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-13T10:26:03,491 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-13T10:26:03,493 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-33945574, suffix=, logDir=hdfs://localhost:41249/hbase/WALs/hregion-33945574, archiveDir=hdfs://localhost:41249/hbase/oldWALs, maxLogs=32 2024-11-13T10:26:03,507 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-33945574/hregion-33945574.1731493563494, exclude list is [], retry=0 2024-11-13T10:26:03,509 DEBUG [AsyncFSWAL-22-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45097,DS-2ac43560-8e20-498a-852c-1b3a1f0157e9,DISK] 2024-11-13T10:26:03,510 DEBUG [AsyncFSWAL-22-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38649,DS-4caa984b-780d-4d07-9178-6a891c1e8e45,DISK] 2024-11-13T10:26:03,510 DEBUG [AsyncFSWAL-22-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44787,DS-1af27950-119b-421a-9580-8a6fa7d1ebb0,DISK] 2024-11-13T10:26:03,512 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-33945574/hregion-33945574.1731493563494 2024-11-13T10:26:03,512 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42913:42913),(127.0.0.1/127.0.0.1:45409:45409),(127.0.0.1/127.0.0.1:35745:35745)] 2024-11-13T10:26:03,512 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => d48059178f9b93b9bd0bb136e0cb43f3, NAME => 'testReplayEditsAfterAbortingFlush,,1731493563488.d48059178f9b93b9bd0bb136e0cb43f3.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsAfterAbortingFlush', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41249/hbase 2024-11-13T10:26:03,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741907_1085 (size=68) 2024-11-13T10:26:03,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741907_1085 (size=68) 2024-11-13T10:26:03,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741907_1085 (size=68) 2024-11-13T10:26:03,525 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterAbortingFlush,,1731493563488.d48059178f9b93b9bd0bb136e0cb43f3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T10:26:03,528 INFO [StoreOpener-d48059178f9b93b9bd0bb136e0cb43f3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region d48059178f9b93b9bd0bb136e0cb43f3 2024-11-13T10:26:03,530 INFO [StoreOpener-d48059178f9b93b9bd0bb136e0cb43f3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d48059178f9b93b9bd0bb136e0cb43f3 columnFamilyName a 2024-11-13T10:26:03,530 DEBUG [StoreOpener-d48059178f9b93b9bd0bb136e0cb43f3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:03,531 INFO [StoreOpener-d48059178f9b93b9bd0bb136e0cb43f3-1 {}] regionserver.HStore(327): Store=d48059178f9b93b9bd0bb136e0cb43f3/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:03,531 INFO [StoreOpener-d48059178f9b93b9bd0bb136e0cb43f3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region d48059178f9b93b9bd0bb136e0cb43f3 2024-11-13T10:26:03,532 INFO [StoreOpener-d48059178f9b93b9bd0bb136e0cb43f3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d48059178f9b93b9bd0bb136e0cb43f3 columnFamilyName b 2024-11-13T10:26:03,532 DEBUG [StoreOpener-d48059178f9b93b9bd0bb136e0cb43f3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:03,532 INFO [StoreOpener-d48059178f9b93b9bd0bb136e0cb43f3-1 {}] regionserver.HStore(327): Store=d48059178f9b93b9bd0bb136e0cb43f3/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:03,533 INFO [StoreOpener-d48059178f9b93b9bd0bb136e0cb43f3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region d48059178f9b93b9bd0bb136e0cb43f3 2024-11-13T10:26:03,534 INFO [StoreOpener-d48059178f9b93b9bd0bb136e0cb43f3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d48059178f9b93b9bd0bb136e0cb43f3 columnFamilyName c 2024-11-13T10:26:03,534 DEBUG [StoreOpener-d48059178f9b93b9bd0bb136e0cb43f3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:03,534 INFO [StoreOpener-d48059178f9b93b9bd0bb136e0cb43f3-1 {}] regionserver.HStore(327): Store=d48059178f9b93b9bd0bb136e0cb43f3/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:03,534 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for d48059178f9b93b9bd0bb136e0cb43f3 2024-11-13T10:26:03,535 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testReplayEditsAfterAbortingFlush/d48059178f9b93b9bd0bb136e0cb43f3 2024-11-13T10:26:03,535 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testReplayEditsAfterAbortingFlush/d48059178f9b93b9bd0bb136e0cb43f3 2024-11-13T10:26:03,536 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for d48059178f9b93b9bd0bb136e0cb43f3 2024-11-13T10:26:03,536 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for d48059178f9b93b9bd0bb136e0cb43f3 2024-11-13T10:26:03,537 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterAbortingFlush descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-13T10:26:03,538 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for d48059178f9b93b9bd0bb136e0cb43f3 2024-11-13T10:26:03,539 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41249/hbase/data/default/testReplayEditsAfterAbortingFlush/d48059178f9b93b9bd0bb136e0cb43f3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T10:26:03,540 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened d48059178f9b93b9bd0bb136e0cb43f3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75098799, jitterRate=0.11905930936336517}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-13T10:26:03,540 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for d48059178f9b93b9bd0bb136e0cb43f3: Writing region info on filesystem at 1731493563525Initializing all the Stores at 1731493563526 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493563526Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493563528 (+2 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493563528Cleaning up temporary data from old regions at 1731493563536 (+8 ms)Region opened successfully at 1731493563540 (+4 ms) 2024-11-13T10:26:03,541 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing d48059178f9b93b9bd0bb136e0cb43f3, disabling compactions & flushes 2024-11-13T10:26:03,541 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterAbortingFlush,,1731493563488.d48059178f9b93b9bd0bb136e0cb43f3. 2024-11-13T10:26:03,541 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterAbortingFlush,,1731493563488.d48059178f9b93b9bd0bb136e0cb43f3. 2024-11-13T10:26:03,541 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterAbortingFlush,,1731493563488.d48059178f9b93b9bd0bb136e0cb43f3. after waiting 0 ms 2024-11-13T10:26:03,541 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterAbortingFlush,,1731493563488.d48059178f9b93b9bd0bb136e0cb43f3. 2024-11-13T10:26:03,541 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsAfterAbortingFlush,,1731493563488.d48059178f9b93b9bd0bb136e0cb43f3. 2024-11-13T10:26:03,541 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for d48059178f9b93b9bd0bb136e0cb43f3: Waiting for close lock at 1731493563541Disabling compacts and flushes for region at 1731493563541Disabling writes for close at 1731493563541Writing region close event to WAL at 1731493563541Closed at 1731493563541 2024-11-13T10:26:03,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741906_1084 (size=95) 2024-11-13T10:26:03,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741906_1084 (size=95) 2024-11-13T10:26:03,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741906_1084 (size=95) 2024-11-13T10:26:03,548 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-13T10:26:03,548 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-33945574:(num 1731493563494) 2024-11-13T10:26:03,548 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-13T10:26:03,550 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:41249/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731493563487, archiveDir=hdfs://localhost:41249/hbase/oldWALs, maxLogs=32 2024-11-13T10:26:03,563 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731493563487/wal.1731493563550, exclude list is [], retry=0 2024-11-13T10:26:03,565 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44787,DS-1af27950-119b-421a-9580-8a6fa7d1ebb0,DISK] 2024-11-13T10:26:03,566 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38649,DS-4caa984b-780d-4d07-9178-6a891c1e8e45,DISK] 2024-11-13T10:26:03,566 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45097,DS-2ac43560-8e20-498a-852c-1b3a1f0157e9,DISK] 2024-11-13T10:26:03,568 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731493563487/wal.1731493563550 2024-11-13T10:26:03,568 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35745:35745),(127.0.0.1/127.0.0.1:45409:45409),(127.0.0.1/127.0.0.1:42913:42913)] 2024-11-13T10:26:03,740 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => d48059178f9b93b9bd0bb136e0cb43f3, NAME => 'testReplayEditsAfterAbortingFlush,,1731493563488.d48059178f9b93b9bd0bb136e0cb43f3.', STARTKEY => '', ENDKEY => ''} 2024-11-13T10:26:03,745 DEBUG [Time-limited test {}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterAbortingFlush d48059178f9b93b9bd0bb136e0cb43f3 2024-11-13T10:26:03,746 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterAbortingFlush,,1731493563488.d48059178f9b93b9bd0bb136e0cb43f3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T10:26:03,747 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for d48059178f9b93b9bd0bb136e0cb43f3 2024-11-13T10:26:03,747 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for d48059178f9b93b9bd0bb136e0cb43f3 2024-11-13T10:26:03,757 INFO [StoreOpener-d48059178f9b93b9bd0bb136e0cb43f3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region d48059178f9b93b9bd0bb136e0cb43f3 2024-11-13T10:26:03,760 INFO [StoreOpener-d48059178f9b93b9bd0bb136e0cb43f3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d48059178f9b93b9bd0bb136e0cb43f3 columnFamilyName a 2024-11-13T10:26:03,760 DEBUG [StoreOpener-d48059178f9b93b9bd0bb136e0cb43f3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:03,764 INFO [StoreOpener-d48059178f9b93b9bd0bb136e0cb43f3-1 {}] regionserver.HStore(327): Store=d48059178f9b93b9bd0bb136e0cb43f3/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:03,765 INFO [StoreOpener-d48059178f9b93b9bd0bb136e0cb43f3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region d48059178f9b93b9bd0bb136e0cb43f3 2024-11-13T10:26:03,768 INFO [StoreOpener-d48059178f9b93b9bd0bb136e0cb43f3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d48059178f9b93b9bd0bb136e0cb43f3 columnFamilyName b 2024-11-13T10:26:03,768 DEBUG [StoreOpener-d48059178f9b93b9bd0bb136e0cb43f3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:03,776 INFO [StoreOpener-d48059178f9b93b9bd0bb136e0cb43f3-1 {}] regionserver.HStore(327): Store=d48059178f9b93b9bd0bb136e0cb43f3/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:03,777 INFO [StoreOpener-d48059178f9b93b9bd0bb136e0cb43f3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region d48059178f9b93b9bd0bb136e0cb43f3 2024-11-13T10:26:03,784 INFO [StoreOpener-d48059178f9b93b9bd0bb136e0cb43f3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d48059178f9b93b9bd0bb136e0cb43f3 columnFamilyName c 2024-11-13T10:26:03,784 DEBUG [StoreOpener-d48059178f9b93b9bd0bb136e0cb43f3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:03,789 INFO [StoreOpener-d48059178f9b93b9bd0bb136e0cb43f3-1 {}] regionserver.HStore(327): Store=d48059178f9b93b9bd0bb136e0cb43f3/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:03,789 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for d48059178f9b93b9bd0bb136e0cb43f3 2024-11-13T10:26:03,791 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testReplayEditsAfterAbortingFlush/d48059178f9b93b9bd0bb136e0cb43f3 2024-11-13T10:26:03,793 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testReplayEditsAfterAbortingFlush/d48059178f9b93b9bd0bb136e0cb43f3 2024-11-13T10:26:03,794 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for d48059178f9b93b9bd0bb136e0cb43f3 2024-11-13T10:26:03,794 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for d48059178f9b93b9bd0bb136e0cb43f3 2024-11-13T10:26:03,795 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterAbortingFlush descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-13T10:26:03,796 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for d48059178f9b93b9bd0bb136e0cb43f3 2024-11-13T10:26:03,797 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened d48059178f9b93b9bd0bb136e0cb43f3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68962794, jitterRate=0.027625709772109985}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-13T10:26:03,797 DEBUG [Time-limited test {}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d48059178f9b93b9bd0bb136e0cb43f3 2024-11-13T10:26:03,798 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for d48059178f9b93b9bd0bb136e0cb43f3: Running coprocessor pre-open hook at 1731493563747Writing region info on filesystem at 1731493563747Initializing all the Stores at 1731493563753 (+6 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493563753Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493563757 (+4 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493563757Cleaning up temporary data from old regions at 1731493563794 (+37 ms)Running coprocessor post-open hooks at 1731493563797 (+3 ms)Region opened successfully at 1731493563797 2024-11-13T10:26:03,809 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing d48059178f9b93b9bd0bb136e0cb43f3 3/3 column families, dataSize=590 B heapSize=2.08 KB 2024-11-13T10:26:03,810 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for d48059178f9b93b9bd0bb136e0cb43f3/a, retrying num=0 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T10:26:04,754 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-13T10:26:04,810 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for d48059178f9b93b9bd0bb136e0cb43f3/a, retrying num=1 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T10:26:05,811 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for d48059178f9b93b9bd0bb136e0cb43f3/a, retrying num=2 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T10:26:06,812 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for d48059178f9b93b9bd0bb136e0cb43f3/a, retrying num=3 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T10:26:07,812 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for d48059178f9b93b9bd0bb136e0cb43f3/a, retrying num=4 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T10:26:08,813 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for d48059178f9b93b9bd0bb136e0cb43f3/a, retrying num=5 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T10:26:09,504 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T10:26:09,814 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for d48059178f9b93b9bd0bb136e0cb43f3/a, retrying num=6 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T10:26:10,815 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for d48059178f9b93b9bd0bb136e0cb43f3/a, retrying num=7 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T10:26:11,062 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsWrittenViaHRegion 2024-11-13T10:26:11,063 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsWrittenViaHRegion Metrics about Tables on a single HBase RegionServer 2024-11-13T10:26:11,063 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsAfterAbortingFlush 2024-11-13T10:26:11,063 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsAfterAbortingFlush Metrics about Tables on a single HBase RegionServer 2024-11-13T10:26:11,816 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for d48059178f9b93b9bd0bb136e0cb43f3/a, retrying num=8 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T10:26:12,817 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for d48059178f9b93b9bd0bb136e0cb43f3/a, retrying num=9 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T10:26:12,818 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for d48059178f9b93b9bd0bb136e0cb43f3: 2024-11-13T10:26:12,818 INFO [Time-limited test {}] wal.AbstractTestWALReplay(671): Expected simulated exception when flushing region, region: testReplayEditsAfterAbortingFlush,,1731493563488.d48059178f9b93b9bd0bb136e0cb43f3. 2024-11-13T10:26:12,831 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for d48059178f9b93b9bd0bb136e0cb43f3: 2024-11-13T10:26:12,831 INFO [Time-limited test {}] wal.AbstractTestWALReplay(691): Expected exception when flushing region because server is stopped,Aborting flush because server is aborted... 2024-11-13T10:26:12,831 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing d48059178f9b93b9bd0bb136e0cb43f3, disabling compactions & flushes 2024-11-13T10:26:12,831 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterAbortingFlush,,1731493563488.d48059178f9b93b9bd0bb136e0cb43f3. 2024-11-13T10:26:12,831 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterAbortingFlush,,1731493563488.d48059178f9b93b9bd0bb136e0cb43f3. 2024-11-13T10:26:12,831 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterAbortingFlush,,1731493563488.d48059178f9b93b9bd0bb136e0cb43f3. after waiting 0 ms 2024-11-13T10:26:12,831 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterAbortingFlush,,1731493563488.d48059178f9b93b9bd0bb136e0cb43f3. 2024-11-13T10:26:12,832 ERROR [Time-limited test {}] regionserver.HRegion(1960): Memstore data size is 1190 in region testReplayEditsAfterAbortingFlush,,1731493563488.d48059178f9b93b9bd0bb136e0cb43f3. 2024-11-13T10:26:12,832 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsAfterAbortingFlush,,1731493563488.d48059178f9b93b9bd0bb136e0cb43f3. 2024-11-13T10:26:12,832 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for d48059178f9b93b9bd0bb136e0cb43f3: Waiting for close lock at 1731493572831Running coprocessor pre-close hooks at 1731493572831Disabling compacts and flushes for region at 1731493572831Disabling writes for close at 1731493572831Writing region close event to WAL at 1731493572832 (+1 ms)Running coprocessor post-close hooks at 1731493572832Closed at 1731493572832 2024-11-13T10:26:12,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741908_1086 (size=2691) 2024-11-13T10:26:12,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741908_1086 (size=2691) 2024-11-13T10:26:12,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741908_1086 (size=2691) 2024-11-13T10:26:12,862 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:41249/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731493563487/wal.1731493563550, size=2.6 K (2691bytes) 2024-11-13T10:26:12,862 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41249/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731493563487/wal.1731493563550 2024-11-13T10:26:12,862 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:41249/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731493563487/wal.1731493563550 after 0ms 2024-11-13T10:26:12,865 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:41249/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731493563487/wal.1731493563550: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-13T10:26:12,865 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:41249/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731493563487/wal.1731493563550 took 3ms 2024-11-13T10:26:12,868 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:41249/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731493563487/wal.1731493563550 so closing down 2024-11-13T10:26:12,868 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-13T10:26:12,869 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000004-wal.1731493563550.temp 2024-11-13T10:26:12,871 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testReplayEditsAfterAbortingFlush/d48059178f9b93b9bd0bb136e0cb43f3/recovered.edits/0000000000000000004-wal.1731493563550.temp 2024-11-13T10:26:12,871 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-13T10:26:12,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741909_1087 (size=2094) 2024-11-13T10:26:12,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741909_1087 (size=2094) 2024-11-13T10:26:12,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741909_1087 (size=2094) 2024-11-13T10:26:12,880 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testReplayEditsAfterAbortingFlush/d48059178f9b93b9bd0bb136e0cb43f3/recovered.edits/0000000000000000004-wal.1731493563550.temp (wrote 20 edits, skipped 0 edits in 0 ms) 2024-11-13T10:26:12,882 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:41249/hbase/data/default/testReplayEditsAfterAbortingFlush/d48059178f9b93b9bd0bb136e0cb43f3/recovered.edits/0000000000000000004-wal.1731493563550.temp to hdfs://localhost:41249/hbase/data/default/testReplayEditsAfterAbortingFlush/d48059178f9b93b9bd0bb136e0cb43f3/recovered.edits/0000000000000000026 2024-11-13T10:26:12,882 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 23 edits across 1 Regions in 16 ms; skipped=3; WAL=hdfs://localhost:41249/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731493563487/wal.1731493563550, size=2.6 K, length=2691, corrupted=false, cancelled=false 2024-11-13T10:26:12,882 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:41249/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731493563487/wal.1731493563550, journal: Splitting hdfs://localhost:41249/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731493563487/wal.1731493563550, size=2.6 K (2691bytes) at 1731493572862Finishing writing output for hdfs://localhost:41249/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731493563487/wal.1731493563550 so closing down at 1731493572868 (+6 ms)Creating recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testReplayEditsAfterAbortingFlush/d48059178f9b93b9bd0bb136e0cb43f3/recovered.edits/0000000000000000004-wal.1731493563550.temp at 1731493572871 (+3 ms)3 split writer threads finished at 1731493572871Closed recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testReplayEditsAfterAbortingFlush/d48059178f9b93b9bd0bb136e0cb43f3/recovered.edits/0000000000000000004-wal.1731493563550.temp (wrote 20 edits, skipped 0 edits in 0 ms) at 1731493572880 (+9 ms)Rename recovered edits hdfs://localhost:41249/hbase/data/default/testReplayEditsAfterAbortingFlush/d48059178f9b93b9bd0bb136e0cb43f3/recovered.edits/0000000000000000004-wal.1731493563550.temp to hdfs://localhost:41249/hbase/data/default/testReplayEditsAfterAbortingFlush/d48059178f9b93b9bd0bb136e0cb43f3/recovered.edits/0000000000000000026 at 1731493572882 (+2 ms)Processed 23 edits across 1 Regions in 16 ms; skipped=3; WAL=hdfs://localhost:41249/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731493563487/wal.1731493563550, size=2.6 K, length=2691, corrupted=false, cancelled=false at 1731493572882 2024-11-13T10:26:12,884 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:41249/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731493563487/wal.1731493563550 to hdfs://localhost:41249/hbase/oldWALs/wal.1731493563550 2024-11-13T10:26:12,885 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:41249/hbase/data/default/testReplayEditsAfterAbortingFlush/d48059178f9b93b9bd0bb136e0cb43f3/recovered.edits/0000000000000000026 2024-11-13T10:26:12,885 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-13T10:26:12,887 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:41249/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731493563487, archiveDir=hdfs://localhost:41249/hbase/oldWALs, maxLogs=32 2024-11-13T10:26:12,909 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731493563487/wal.1731493572887, exclude list is [], retry=0 2024-11-13T10:26:12,912 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38649,DS-4caa984b-780d-4d07-9178-6a891c1e8e45,DISK] 2024-11-13T10:26:12,913 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45097,DS-2ac43560-8e20-498a-852c-1b3a1f0157e9,DISK] 2024-11-13T10:26:12,913 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44787,DS-1af27950-119b-421a-9580-8a6fa7d1ebb0,DISK] 2024-11-13T10:26:12,916 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731493563487/wal.1731493572887 2024-11-13T10:26:12,919 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45409:45409),(127.0.0.1/127.0.0.1:42913:42913),(127.0.0.1/127.0.0.1:35745:35745)] 2024-11-13T10:26:12,919 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => d48059178f9b93b9bd0bb136e0cb43f3, NAME => 'testReplayEditsAfterAbortingFlush,,1731493563488.d48059178f9b93b9bd0bb136e0cb43f3.', STARTKEY => '', ENDKEY => ''} 2024-11-13T10:26:12,920 DEBUG [Time-limited test {}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterAbortingFlush d48059178f9b93b9bd0bb136e0cb43f3 2024-11-13T10:26:12,920 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterAbortingFlush,,1731493563488.d48059178f9b93b9bd0bb136e0cb43f3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T10:26:12,920 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for d48059178f9b93b9bd0bb136e0cb43f3 2024-11-13T10:26:12,920 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for d48059178f9b93b9bd0bb136e0cb43f3 2024-11-13T10:26:12,922 INFO [StoreOpener-d48059178f9b93b9bd0bb136e0cb43f3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region d48059178f9b93b9bd0bb136e0cb43f3 2024-11-13T10:26:12,923 INFO [StoreOpener-d48059178f9b93b9bd0bb136e0cb43f3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d48059178f9b93b9bd0bb136e0cb43f3 columnFamilyName a 2024-11-13T10:26:12,923 DEBUG [StoreOpener-d48059178f9b93b9bd0bb136e0cb43f3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:12,924 INFO [StoreOpener-d48059178f9b93b9bd0bb136e0cb43f3-1 {}] regionserver.HStore(327): Store=d48059178f9b93b9bd0bb136e0cb43f3/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:12,924 INFO [StoreOpener-d48059178f9b93b9bd0bb136e0cb43f3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region d48059178f9b93b9bd0bb136e0cb43f3 2024-11-13T10:26:12,925 INFO [StoreOpener-d48059178f9b93b9bd0bb136e0cb43f3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d48059178f9b93b9bd0bb136e0cb43f3 columnFamilyName b 2024-11-13T10:26:12,925 DEBUG [StoreOpener-d48059178f9b93b9bd0bb136e0cb43f3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:12,926 INFO [StoreOpener-d48059178f9b93b9bd0bb136e0cb43f3-1 {}] regionserver.HStore(327): Store=d48059178f9b93b9bd0bb136e0cb43f3/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:12,926 INFO [StoreOpener-d48059178f9b93b9bd0bb136e0cb43f3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region d48059178f9b93b9bd0bb136e0cb43f3 2024-11-13T10:26:12,927 INFO [StoreOpener-d48059178f9b93b9bd0bb136e0cb43f3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d48059178f9b93b9bd0bb136e0cb43f3 columnFamilyName c 2024-11-13T10:26:12,927 DEBUG [StoreOpener-d48059178f9b93b9bd0bb136e0cb43f3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:12,927 INFO [StoreOpener-d48059178f9b93b9bd0bb136e0cb43f3-1 {}] regionserver.HStore(327): Store=d48059178f9b93b9bd0bb136e0cb43f3/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:12,927 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for d48059178f9b93b9bd0bb136e0cb43f3 2024-11-13T10:26:12,928 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testReplayEditsAfterAbortingFlush/d48059178f9b93b9bd0bb136e0cb43f3 2024-11-13T10:26:12,930 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testReplayEditsAfterAbortingFlush/d48059178f9b93b9bd0bb136e0cb43f3 2024-11-13T10:26:12,930 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:41249/hbase/data/default/testReplayEditsAfterAbortingFlush/d48059178f9b93b9bd0bb136e0cb43f3/recovered.edits/0000000000000000026 2024-11-13T10:26:12,933 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:41249/hbase/data/default/testReplayEditsAfterAbortingFlush/d48059178f9b93b9bd0bb136e0cb43f3/recovered.edits/0000000000000000026: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-13T10:26:12,934 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 20, skipped 0, firstSequenceIdInLog=4, maxSequenceIdInLog=26, path=hdfs://localhost:41249/hbase/data/default/testReplayEditsAfterAbortingFlush/d48059178f9b93b9bd0bb136e0cb43f3/recovered.edits/0000000000000000026 2024-11-13T10:26:12,934 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing d48059178f9b93b9bd0bb136e0cb43f3 3/3 column families, dataSize=1.16 KB heapSize=3.41 KB 2024-11-13T10:26:12,957 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41249/hbase/data/default/testReplayEditsAfterAbortingFlush/d48059178f9b93b9bd0bb136e0cb43f3/.tmp/a/573387e8cd2e442e8aa2c1e14172aa93 is 64, key is testReplayEditsAfterAbortingFlush12/a:q/1731493572822/Put/seqid=0 2024-11-13T10:26:12,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741911_1089 (size=5523) 2024-11-13T10:26:12,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741911_1089 (size=5523) 2024-11-13T10:26:12,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741911_1089 (size=5523) 2024-11-13T10:26:12,966 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=416 B at sequenceid=26 (bloomFilter=true), to=hdfs://localhost:41249/hbase/data/default/testReplayEditsAfterAbortingFlush/d48059178f9b93b9bd0bb136e0cb43f3/.tmp/a/573387e8cd2e442e8aa2c1e14172aa93 2024-11-13T10:26:12,992 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41249/hbase/data/default/testReplayEditsAfterAbortingFlush/d48059178f9b93b9bd0bb136e0cb43f3/.tmp/b/969434c9771248acb27385ca368c928f is 64, key is testReplayEditsAfterAbortingFlush10/b:q/1731493572819/Put/seqid=0 2024-11-13T10:26:12,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741912_1090 (size=5524) 2024-11-13T10:26:12,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741912_1090 (size=5524) 2024-11-13T10:26:13,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741912_1090 (size=5524) 2024-11-13T10:26:13,000 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=417 B at sequenceid=26 (bloomFilter=true), to=hdfs://localhost:41249/hbase/data/default/testReplayEditsAfterAbortingFlush/d48059178f9b93b9bd0bb136e0cb43f3/.tmp/b/969434c9771248acb27385ca368c928f 2024-11-13T10:26:13,029 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41249/hbase/data/default/testReplayEditsAfterAbortingFlush/d48059178f9b93b9bd0bb136e0cb43f3/.tmp/c/9cd13307a0824dc3a403c40bf447eb97 is 64, key is testReplayEditsAfterAbortingFlush11/c:q/1731493572820/Put/seqid=0 2024-11-13T10:26:13,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741913_1091 (size=5457) 2024-11-13T10:26:13,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741913_1091 (size=5457) 2024-11-13T10:26:13,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741913_1091 (size=5457) 2024-11-13T10:26:13,038 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=357 B at sequenceid=26 (bloomFilter=true), to=hdfs://localhost:41249/hbase/data/default/testReplayEditsAfterAbortingFlush/d48059178f9b93b9bd0bb136e0cb43f3/.tmp/c/9cd13307a0824dc3a403c40bf447eb97 2024-11-13T10:26:13,045 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41249/hbase/data/default/testReplayEditsAfterAbortingFlush/d48059178f9b93b9bd0bb136e0cb43f3/.tmp/a/573387e8cd2e442e8aa2c1e14172aa93 as hdfs://localhost:41249/hbase/data/default/testReplayEditsAfterAbortingFlush/d48059178f9b93b9bd0bb136e0cb43f3/a/573387e8cd2e442e8aa2c1e14172aa93 2024-11-13T10:26:13,051 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41249/hbase/data/default/testReplayEditsAfterAbortingFlush/d48059178f9b93b9bd0bb136e0cb43f3/a/573387e8cd2e442e8aa2c1e14172aa93, entries=7, sequenceid=26, filesize=5.4 K 2024-11-13T10:26:13,052 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41249/hbase/data/default/testReplayEditsAfterAbortingFlush/d48059178f9b93b9bd0bb136e0cb43f3/.tmp/b/969434c9771248acb27385ca368c928f as hdfs://localhost:41249/hbase/data/default/testReplayEditsAfterAbortingFlush/d48059178f9b93b9bd0bb136e0cb43f3/b/969434c9771248acb27385ca368c928f 2024-11-13T10:26:13,059 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41249/hbase/data/default/testReplayEditsAfterAbortingFlush/d48059178f9b93b9bd0bb136e0cb43f3/b/969434c9771248acb27385ca368c928f, entries=7, sequenceid=26, filesize=5.4 K 2024-11-13T10:26:13,060 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41249/hbase/data/default/testReplayEditsAfterAbortingFlush/d48059178f9b93b9bd0bb136e0cb43f3/.tmp/c/9cd13307a0824dc3a403c40bf447eb97 as hdfs://localhost:41249/hbase/data/default/testReplayEditsAfterAbortingFlush/d48059178f9b93b9bd0bb136e0cb43f3/c/9cd13307a0824dc3a403c40bf447eb97 2024-11-13T10:26:13,066 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41249/hbase/data/default/testReplayEditsAfterAbortingFlush/d48059178f9b93b9bd0bb136e0cb43f3/c/9cd13307a0824dc3a403c40bf447eb97, entries=6, sequenceid=26, filesize=5.3 K 2024-11-13T10:26:13,066 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.16 KB/1190, heapSize ~3.36 KB/3440, currentSize=0 B/0 for d48059178f9b93b9bd0bb136e0cb43f3 in 132ms, sequenceid=26, compaction requested=false; wal=null 2024-11-13T10:26:13,067 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:41249/hbase/data/default/testReplayEditsAfterAbortingFlush/d48059178f9b93b9bd0bb136e0cb43f3/recovered.edits/0000000000000000026 2024-11-13T10:26:13,068 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for d48059178f9b93b9bd0bb136e0cb43f3 2024-11-13T10:26:13,068 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for d48059178f9b93b9bd0bb136e0cb43f3 2024-11-13T10:26:13,069 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterAbortingFlush descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-13T10:26:13,071 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for d48059178f9b93b9bd0bb136e0cb43f3 2024-11-13T10:26:13,074 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41249/hbase/data/default/testReplayEditsAfterAbortingFlush/d48059178f9b93b9bd0bb136e0cb43f3/recovered.edits/26.seqid, newMaxSeqId=26, maxSeqId=1 2024-11-13T10:26:13,075 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened d48059178f9b93b9bd0bb136e0cb43f3; next sequenceid=27; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70578057, jitterRate=0.05169500410556793}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-13T10:26:13,075 DEBUG [Time-limited test {}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d48059178f9b93b9bd0bb136e0cb43f3 2024-11-13T10:26:13,075 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for d48059178f9b93b9bd0bb136e0cb43f3: Running coprocessor pre-open hook at 1731493572920Writing region info on filesystem at 1731493572920Initializing all the Stores at 1731493572921 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493572922 (+1 ms)Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493572922Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493572922Obtaining lock to block concurrent updates at 1731493572934 (+12 ms)Preparing flush snapshotting stores in d48059178f9b93b9bd0bb136e0cb43f3 at 1731493572934Finished memstore snapshotting testReplayEditsAfterAbortingFlush,,1731493563488.d48059178f9b93b9bd0bb136e0cb43f3., syncing WAL and waiting on mvcc, flushsize=dataSize=1190, getHeapSize=3440, getOffHeapSize=0, getCellsCount=20 at 1731493572935 (+1 ms)Flushing stores of testReplayEditsAfterAbortingFlush,,1731493563488.d48059178f9b93b9bd0bb136e0cb43f3. at 1731493572935Flushing d48059178f9b93b9bd0bb136e0cb43f3/a: creating writer at 1731493572935Flushing d48059178f9b93b9bd0bb136e0cb43f3/a: appending metadata at 1731493572956 (+21 ms)Flushing d48059178f9b93b9bd0bb136e0cb43f3/a: closing flushed file at 1731493572956Flushing d48059178f9b93b9bd0bb136e0cb43f3/b: creating writer at 1731493572973 (+17 ms)Flushing d48059178f9b93b9bd0bb136e0cb43f3/b: appending metadata at 1731493572992 (+19 ms)Flushing d48059178f9b93b9bd0bb136e0cb43f3/b: closing flushed file at 1731493572992Flushing d48059178f9b93b9bd0bb136e0cb43f3/c: creating writer at 1731493573007 (+15 ms)Flushing d48059178f9b93b9bd0bb136e0cb43f3/c: appending metadata at 1731493573028 (+21 ms)Flushing d48059178f9b93b9bd0bb136e0cb43f3/c: closing flushed file at 1731493573029 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7d9243b9: reopening flushed file at 1731493573044 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@c2f19c4: reopening flushed file at 1731493573051 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@226f3265: reopening flushed file at 1731493573059 (+8 ms)Finished flush of dataSize ~1.16 KB/1190, heapSize ~3.36 KB/3440, currentSize=0 B/0 for d48059178f9b93b9bd0bb136e0cb43f3 in 132ms, sequenceid=26, compaction requested=false; wal=null at 1731493573066 (+7 ms)Cleaning up temporary data from old regions at 1731493573068 (+2 ms)Running coprocessor post-open hooks at 1731493573075 (+7 ms)Region opened successfully at 1731493573075 2024-11-13T10:26:13,103 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterAbortingFlush Thread=409 (was 411), OpenFileDescriptor=1164 (was 1104) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=597 (was 563) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=313 (was 375) 2024-11-13T10:26:13,103 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1164 is superior to 1024 2024-11-13T10:26:13,122 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testDatalossWhenInputError Thread=409, OpenFileDescriptor=1164, MaxFileDescriptor=1048576, SystemLoadAverage=597, ProcessCount=11, AvailableMemoryMB=312 2024-11-13T10:26:13,122 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1164 is superior to 1024 2024-11-13T10:26:13,138 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-13T10:26:13,139 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-13T10:26:13,140 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-13T10:26:13,142 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-80989423, suffix=, logDir=hdfs://localhost:41249/hbase/WALs/hregion-80989423, archiveDir=hdfs://localhost:41249/hbase/oldWALs, maxLogs=32 2024-11-13T10:26:13,156 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-80989423/hregion-80989423.1731493573143, exclude list is [], retry=0 2024-11-13T10:26:13,159 DEBUG [AsyncFSWAL-24-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45097,DS-2ac43560-8e20-498a-852c-1b3a1f0157e9,DISK] 2024-11-13T10:26:13,159 DEBUG [AsyncFSWAL-24-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44787,DS-1af27950-119b-421a-9580-8a6fa7d1ebb0,DISK] 2024-11-13T10:26:13,159 DEBUG [AsyncFSWAL-24-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38649,DS-4caa984b-780d-4d07-9178-6a891c1e8e45,DISK] 2024-11-13T10:26:13,162 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-80989423/hregion-80989423.1731493573143 2024-11-13T10:26:13,163 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42913:42913),(127.0.0.1/127.0.0.1:35745:35745),(127.0.0.1/127.0.0.1:45409:45409)] 2024-11-13T10:26:13,163 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 5363d25c8d3503512d5ad8cf32d185cc, NAME => 'testDatalossWhenInputError,,1731493573138.5363d25c8d3503512d5ad8cf32d185cc.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testDatalossWhenInputError', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41249/hbase 2024-11-13T10:26:13,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741915_1093 (size=61) 2024-11-13T10:26:13,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741915_1093 (size=61) 2024-11-13T10:26:13,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741915_1093 (size=61) 2024-11-13T10:26:13,177 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testDatalossWhenInputError,,1731493573138.5363d25c8d3503512d5ad8cf32d185cc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T10:26:13,179 INFO [StoreOpener-5363d25c8d3503512d5ad8cf32d185cc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 5363d25c8d3503512d5ad8cf32d185cc 2024-11-13T10:26:13,181 INFO [StoreOpener-5363d25c8d3503512d5ad8cf32d185cc-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5363d25c8d3503512d5ad8cf32d185cc columnFamilyName a 2024-11-13T10:26:13,181 DEBUG [StoreOpener-5363d25c8d3503512d5ad8cf32d185cc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:13,181 INFO [StoreOpener-5363d25c8d3503512d5ad8cf32d185cc-1 {}] regionserver.HStore(327): Store=5363d25c8d3503512d5ad8cf32d185cc/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:13,181 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 5363d25c8d3503512d5ad8cf32d185cc 2024-11-13T10:26:13,182 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testDatalossWhenInputError/5363d25c8d3503512d5ad8cf32d185cc 2024-11-13T10:26:13,183 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testDatalossWhenInputError/5363d25c8d3503512d5ad8cf32d185cc 2024-11-13T10:26:13,183 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 5363d25c8d3503512d5ad8cf32d185cc 2024-11-13T10:26:13,183 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 5363d25c8d3503512d5ad8cf32d185cc 2024-11-13T10:26:13,185 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 5363d25c8d3503512d5ad8cf32d185cc 2024-11-13T10:26:13,187 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41249/hbase/data/default/testDatalossWhenInputError/5363d25c8d3503512d5ad8cf32d185cc/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T10:26:13,188 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 5363d25c8d3503512d5ad8cf32d185cc; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70082192, jitterRate=0.044306039810180664}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T10:26:13,189 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 5363d25c8d3503512d5ad8cf32d185cc: Writing region info on filesystem at 1731493573178Initializing all the Stores at 1731493573178Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493573179 (+1 ms)Cleaning up temporary data from old regions at 1731493573183 (+4 ms)Region opened successfully at 1731493573189 (+6 ms) 2024-11-13T10:26:13,189 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 5363d25c8d3503512d5ad8cf32d185cc, disabling compactions & flushes 2024-11-13T10:26:13,189 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testDatalossWhenInputError,,1731493573138.5363d25c8d3503512d5ad8cf32d185cc. 2024-11-13T10:26:13,189 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testDatalossWhenInputError,,1731493573138.5363d25c8d3503512d5ad8cf32d185cc. 2024-11-13T10:26:13,189 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testDatalossWhenInputError,,1731493573138.5363d25c8d3503512d5ad8cf32d185cc. after waiting 0 ms 2024-11-13T10:26:13,189 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testDatalossWhenInputError,,1731493573138.5363d25c8d3503512d5ad8cf32d185cc. 2024-11-13T10:26:13,192 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testDatalossWhenInputError,,1731493573138.5363d25c8d3503512d5ad8cf32d185cc. 2024-11-13T10:26:13,192 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 5363d25c8d3503512d5ad8cf32d185cc: Waiting for close lock at 1731493573189Disabling compacts and flushes for region at 1731493573189Disabling writes for close at 1731493573189Writing region close event to WAL at 1731493573192 (+3 ms)Closed at 1731493573192 2024-11-13T10:26:13,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741914_1092 (size=95) 2024-11-13T10:26:13,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741914_1092 (size=95) 2024-11-13T10:26:13,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741914_1092 (size=95) 2024-11-13T10:26:13,202 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-13T10:26:13,202 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-80989423:(num 1731493573143) 2024-11-13T10:26:13,203 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-13T10:26:13,205 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:41249/hbase/WALs/testdatalosswheninputerror-manual,16010,1731493573136, archiveDir=hdfs://localhost:41249/hbase/oldWALs, maxLogs=32 2024-11-13T10:26:13,229 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testdatalosswheninputerror-manual,16010,1731493573136/wal.1731493573205, exclude list is [], retry=0 2024-11-13T10:26:13,231 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45097,DS-2ac43560-8e20-498a-852c-1b3a1f0157e9,DISK] 2024-11-13T10:26:13,232 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38649,DS-4caa984b-780d-4d07-9178-6a891c1e8e45,DISK] 2024-11-13T10:26:13,232 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44787,DS-1af27950-119b-421a-9580-8a6fa7d1ebb0,DISK] 2024-11-13T10:26:13,236 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testdatalosswheninputerror-manual,16010,1731493573136/wal.1731493573205 2024-11-13T10:26:13,237 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42913:42913),(127.0.0.1/127.0.0.1:45409:45409),(127.0.0.1/127.0.0.1:35745:35745)] 2024-11-13T10:26:13,237 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 5363d25c8d3503512d5ad8cf32d185cc, NAME => 'testDatalossWhenInputError,,1731493573138.5363d25c8d3503512d5ad8cf32d185cc.', STARTKEY => '', ENDKEY => ''} 2024-11-13T10:26:13,237 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testDatalossWhenInputError,,1731493573138.5363d25c8d3503512d5ad8cf32d185cc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T10:26:13,237 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 5363d25c8d3503512d5ad8cf32d185cc 2024-11-13T10:26:13,237 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 5363d25c8d3503512d5ad8cf32d185cc 2024-11-13T10:26:13,239 INFO [StoreOpener-5363d25c8d3503512d5ad8cf32d185cc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 5363d25c8d3503512d5ad8cf32d185cc 2024-11-13T10:26:13,240 INFO [StoreOpener-5363d25c8d3503512d5ad8cf32d185cc-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5363d25c8d3503512d5ad8cf32d185cc columnFamilyName a 2024-11-13T10:26:13,240 DEBUG [StoreOpener-5363d25c8d3503512d5ad8cf32d185cc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:13,240 INFO [StoreOpener-5363d25c8d3503512d5ad8cf32d185cc-1 {}] regionserver.HStore(327): Store=5363d25c8d3503512d5ad8cf32d185cc/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:13,241 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 5363d25c8d3503512d5ad8cf32d185cc 2024-11-13T10:26:13,241 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testDatalossWhenInputError/5363d25c8d3503512d5ad8cf32d185cc 2024-11-13T10:26:13,242 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testDatalossWhenInputError/5363d25c8d3503512d5ad8cf32d185cc 2024-11-13T10:26:13,243 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 5363d25c8d3503512d5ad8cf32d185cc 2024-11-13T10:26:13,243 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 5363d25c8d3503512d5ad8cf32d185cc 2024-11-13T10:26:13,245 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 5363d25c8d3503512d5ad8cf32d185cc 2024-11-13T10:26:13,246 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 5363d25c8d3503512d5ad8cf32d185cc; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70367225, jitterRate=0.04855336248874664}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T10:26:13,246 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 5363d25c8d3503512d5ad8cf32d185cc: Writing region info on filesystem at 1731493573237Initializing all the Stores at 1731493573238 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493573239 (+1 ms)Cleaning up temporary data from old regions at 1731493573243 (+4 ms)Region opened successfully at 1731493573246 (+3 ms) 2024-11-13T10:26:13,255 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 5363d25c8d3503512d5ad8cf32d185cc, disabling compactions & flushes 2024-11-13T10:26:13,255 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testDatalossWhenInputError,,1731493573138.5363d25c8d3503512d5ad8cf32d185cc. 2024-11-13T10:26:13,255 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testDatalossWhenInputError,,1731493573138.5363d25c8d3503512d5ad8cf32d185cc. 2024-11-13T10:26:13,255 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testDatalossWhenInputError,,1731493573138.5363d25c8d3503512d5ad8cf32d185cc. after waiting 0 ms 2024-11-13T10:26:13,255 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testDatalossWhenInputError,,1731493573138.5363d25c8d3503512d5ad8cf32d185cc. 2024-11-13T10:26:13,256 ERROR [Time-limited test {}] regionserver.HRegion(1960): Memstore data size is 750 in region testDatalossWhenInputError,,1731493573138.5363d25c8d3503512d5ad8cf32d185cc. 2024-11-13T10:26:13,256 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testDatalossWhenInputError,,1731493573138.5363d25c8d3503512d5ad8cf32d185cc. 2024-11-13T10:26:13,256 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 5363d25c8d3503512d5ad8cf32d185cc: Waiting for close lock at 1731493573255Disabling compacts and flushes for region at 1731493573255Disabling writes for close at 1731493573255Writing region close event to WAL at 1731493573256 (+1 ms)Closed at 1731493573256 2024-11-13T10:26:13,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741916_1094 (size=1050) 2024-11-13T10:26:13,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741916_1094 (size=1050) 2024-11-13T10:26:13,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741916_1094 (size=1050) 2024-11-13T10:26:13,259 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /hbase/WALs/testdatalosswheninputerror-manual,16010,1731493573136/wal.1731493573205 not finished, retry = 0 2024-11-13T10:26:13,376 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:41249/hbase/WALs/testdatalosswheninputerror-manual,16010,1731493573136/wal.1731493573205, size=1.0 K (1050bytes) 2024-11-13T10:26:13,376 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41249/hbase/WALs/testdatalosswheninputerror-manual,16010,1731493573136/wal.1731493573205 2024-11-13T10:26:13,377 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:41249/hbase/WALs/testdatalosswheninputerror-manual,16010,1731493573136/wal.1731493573205 after 1ms 2024-11-13T10:26:13,379 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:41249/hbase/WALs/testdatalosswheninputerror-manual,16010,1731493573136/wal.1731493573205: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-13T10:26:13,379 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:41249/hbase/WALs/testdatalosswheninputerror-manual,16010,1731493573136/wal.1731493573205 took 3ms 2024-11-13T10:26:13,382 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:41249/hbase/WALs/testdatalosswheninputerror-manual,16010,1731493573136/wal.1731493573205 so closing down 2024-11-13T10:26:13,382 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-13T10:26:13,383 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1731493573205.temp 2024-11-13T10:26:13,385 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testDatalossWhenInputError/5363d25c8d3503512d5ad8cf32d185cc/recovered.edits/0000000000000000003-wal.1731493573205.temp 2024-11-13T10:26:13,385 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-13T10:26:13,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741917_1095 (size=1050) 2024-11-13T10:26:13,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741917_1095 (size=1050) 2024-11-13T10:26:13,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741917_1095 (size=1050) 2024-11-13T10:26:13,393 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testDatalossWhenInputError/5363d25c8d3503512d5ad8cf32d185cc/recovered.edits/0000000000000000003-wal.1731493573205.temp (wrote 10 edits, skipped 0 edits in 0 ms) 2024-11-13T10:26:13,394 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:41249/hbase/data/default/testDatalossWhenInputError/5363d25c8d3503512d5ad8cf32d185cc/recovered.edits/0000000000000000003-wal.1731493573205.temp to hdfs://localhost:41249/hbase/data/default/testDatalossWhenInputError/5363d25c8d3503512d5ad8cf32d185cc/recovered.edits/0000000000000000012 2024-11-13T10:26:13,395 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 10 edits across 1 Regions in 14 ms; skipped=0; WAL=hdfs://localhost:41249/hbase/WALs/testdatalosswheninputerror-manual,16010,1731493573136/wal.1731493573205, size=1.0 K, length=1050, corrupted=false, cancelled=false 2024-11-13T10:26:13,395 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:41249/hbase/WALs/testdatalosswheninputerror-manual,16010,1731493573136/wal.1731493573205, journal: Splitting hdfs://localhost:41249/hbase/WALs/testdatalosswheninputerror-manual,16010,1731493573136/wal.1731493573205, size=1.0 K (1050bytes) at 1731493573376Finishing writing output for hdfs://localhost:41249/hbase/WALs/testdatalosswheninputerror-manual,16010,1731493573136/wal.1731493573205 so closing down at 1731493573382 (+6 ms)Creating recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testDatalossWhenInputError/5363d25c8d3503512d5ad8cf32d185cc/recovered.edits/0000000000000000003-wal.1731493573205.temp at 1731493573385 (+3 ms)3 split writer threads finished at 1731493573385Closed recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testDatalossWhenInputError/5363d25c8d3503512d5ad8cf32d185cc/recovered.edits/0000000000000000003-wal.1731493573205.temp (wrote 10 edits, skipped 0 edits in 0 ms) at 1731493573393 (+8 ms)Rename recovered edits hdfs://localhost:41249/hbase/data/default/testDatalossWhenInputError/5363d25c8d3503512d5ad8cf32d185cc/recovered.edits/0000000000000000003-wal.1731493573205.temp to hdfs://localhost:41249/hbase/data/default/testDatalossWhenInputError/5363d25c8d3503512d5ad8cf32d185cc/recovered.edits/0000000000000000012 at 1731493573394 (+1 ms)Processed 10 edits across 1 Regions in 14 ms; skipped=0; WAL=hdfs://localhost:41249/hbase/WALs/testdatalosswheninputerror-manual,16010,1731493573136/wal.1731493573205, size=1.0 K, length=1050, corrupted=false, cancelled=false at 1731493573395 (+1 ms) 2024-11-13T10:26:13,396 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:41249/hbase/WALs/testdatalosswheninputerror-manual,16010,1731493573136/wal.1731493573205 to hdfs://localhost:41249/hbase/oldWALs/wal.1731493573205 2024-11-13T10:26:13,397 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:41249/hbase/data/default/testDatalossWhenInputError/5363d25c8d3503512d5ad8cf32d185cc/recovered.edits/0000000000000000012 2024-11-13T10:26:13,401 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:41249/hbase/data/default/testDatalossWhenInputError/5363d25c8d3503512d5ad8cf32d185cc/recovered.edits/0000000000000000012: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-13T10:26:13,747 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-13T10:26:13,750 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:41249/hbase/WALs/testdatalosswheninputerror-manual,16010,1731493573136, archiveDir=hdfs://localhost:41249/hbase/oldWALs, maxLogs=32 2024-11-13T10:26:13,771 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testdatalosswheninputerror-manual,16010,1731493573136/wal.1731493573750, exclude list is [], retry=0 2024-11-13T10:26:13,774 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38649,DS-4caa984b-780d-4d07-9178-6a891c1e8e45,DISK] 2024-11-13T10:26:13,775 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44787,DS-1af27950-119b-421a-9580-8a6fa7d1ebb0,DISK] 2024-11-13T10:26:13,775 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45097,DS-2ac43560-8e20-498a-852c-1b3a1f0157e9,DISK] 2024-11-13T10:26:13,777 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testdatalosswheninputerror-manual,16010,1731493573136/wal.1731493573750 2024-11-13T10:26:13,777 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45409:45409),(127.0.0.1/127.0.0.1:35745:35745),(127.0.0.1/127.0.0.1:42913:42913)] 2024-11-13T10:26:13,777 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 5363d25c8d3503512d5ad8cf32d185cc, NAME => 'testDatalossWhenInputError,,1731493573138.5363d25c8d3503512d5ad8cf32d185cc.', STARTKEY => '', ENDKEY => ''} 2024-11-13T10:26:13,778 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testDatalossWhenInputError,,1731493573138.5363d25c8d3503512d5ad8cf32d185cc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T10:26:13,778 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 5363d25c8d3503512d5ad8cf32d185cc 2024-11-13T10:26:13,778 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 5363d25c8d3503512d5ad8cf32d185cc 2024-11-13T10:26:13,782 INFO [StoreOpener-5363d25c8d3503512d5ad8cf32d185cc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 5363d25c8d3503512d5ad8cf32d185cc 2024-11-13T10:26:13,784 INFO [StoreOpener-5363d25c8d3503512d5ad8cf32d185cc-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5363d25c8d3503512d5ad8cf32d185cc columnFamilyName a 2024-11-13T10:26:13,784 DEBUG [StoreOpener-5363d25c8d3503512d5ad8cf32d185cc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:13,785 INFO [StoreOpener-5363d25c8d3503512d5ad8cf32d185cc-1 {}] regionserver.HStore(327): Store=5363d25c8d3503512d5ad8cf32d185cc/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:13,785 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 5363d25c8d3503512d5ad8cf32d185cc 2024-11-13T10:26:13,786 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testDatalossWhenInputError/5363d25c8d3503512d5ad8cf32d185cc 2024-11-13T10:26:13,788 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testDatalossWhenInputError/5363d25c8d3503512d5ad8cf32d185cc 2024-11-13T10:26:13,789 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:41249/hbase/data/default/testDatalossWhenInputError/5363d25c8d3503512d5ad8cf32d185cc/recovered.edits/0000000000000000012 2024-11-13T10:26:13,791 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:41249/hbase/data/default/testDatalossWhenInputError/5363d25c8d3503512d5ad8cf32d185cc/recovered.edits/0000000000000000012: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-13T10:26:13,792 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 10, skipped 0, firstSequenceIdInLog=3, maxSequenceIdInLog=12, path=hdfs://localhost:41249/hbase/data/default/testDatalossWhenInputError/5363d25c8d3503512d5ad8cf32d185cc/recovered.edits/0000000000000000012 2024-11-13T10:26:13,793 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 5363d25c8d3503512d5ad8cf32d185cc 1/1 column families, dataSize=750 B heapSize=1.73 KB 2024-11-13T10:26:13,813 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41249/hbase/data/default/testDatalossWhenInputError/5363d25c8d3503512d5ad8cf32d185cc/.tmp/a/bad0324f8b7744cea116d294ec24092d is 79, key is testDatalossWhenInputError/a:x0/1731493573246/Put/seqid=0 2024-11-13T10:26:13,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741919_1097 (size=5808) 2024-11-13T10:26:13,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741919_1097 (size=5808) 2024-11-13T10:26:13,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741919_1097 (size=5808) 2024-11-13T10:26:13,823 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=750 B at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:41249/hbase/data/default/testDatalossWhenInputError/5363d25c8d3503512d5ad8cf32d185cc/.tmp/a/bad0324f8b7744cea116d294ec24092d 2024-11-13T10:26:13,836 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41249/hbase/data/default/testDatalossWhenInputError/5363d25c8d3503512d5ad8cf32d185cc/.tmp/a/bad0324f8b7744cea116d294ec24092d as hdfs://localhost:41249/hbase/data/default/testDatalossWhenInputError/5363d25c8d3503512d5ad8cf32d185cc/a/bad0324f8b7744cea116d294ec24092d 2024-11-13T10:26:13,847 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41249/hbase/data/default/testDatalossWhenInputError/5363d25c8d3503512d5ad8cf32d185cc/a/bad0324f8b7744cea116d294ec24092d, entries=10, sequenceid=12, filesize=5.7 K 2024-11-13T10:26:13,847 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~750 B/750, heapSize ~1.72 KB/1760, currentSize=0 B/0 for 5363d25c8d3503512d5ad8cf32d185cc in 55ms, sequenceid=12, compaction requested=false; wal=null 2024-11-13T10:26:13,848 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:41249/hbase/data/default/testDatalossWhenInputError/5363d25c8d3503512d5ad8cf32d185cc/recovered.edits/0000000000000000012 2024-11-13T10:26:13,849 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 5363d25c8d3503512d5ad8cf32d185cc 2024-11-13T10:26:13,849 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 5363d25c8d3503512d5ad8cf32d185cc 2024-11-13T10:26:13,852 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 5363d25c8d3503512d5ad8cf32d185cc 2024-11-13T10:26:13,855 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41249/hbase/data/default/testDatalossWhenInputError/5363d25c8d3503512d5ad8cf32d185cc/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=1 2024-11-13T10:26:13,856 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 5363d25c8d3503512d5ad8cf32d185cc; next sequenceid=13; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64302991, jitterRate=-0.041810765862464905}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T10:26:13,856 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 5363d25c8d3503512d5ad8cf32d185cc: Writing region info on filesystem at 1731493573778Initializing all the Stores at 1731493573781 (+3 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493573781Obtaining lock to block concurrent updates at 1731493573793 (+12 ms)Preparing flush snapshotting stores in 5363d25c8d3503512d5ad8cf32d185cc at 1731493573793Finished memstore snapshotting testDatalossWhenInputError,,1731493573138.5363d25c8d3503512d5ad8cf32d185cc., syncing WAL and waiting on mvcc, flushsize=dataSize=750, getHeapSize=1760, getOffHeapSize=0, getCellsCount=10 at 1731493573793Flushing stores of testDatalossWhenInputError,,1731493573138.5363d25c8d3503512d5ad8cf32d185cc. at 1731493573793Flushing 5363d25c8d3503512d5ad8cf32d185cc/a: creating writer at 1731493573793Flushing 5363d25c8d3503512d5ad8cf32d185cc/a: appending metadata at 1731493573812 (+19 ms)Flushing 5363d25c8d3503512d5ad8cf32d185cc/a: closing flushed file at 1731493573812Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3dbbbd73: reopening flushed file at 1731493573834 (+22 ms)Finished flush of dataSize ~750 B/750, heapSize ~1.72 KB/1760, currentSize=0 B/0 for 5363d25c8d3503512d5ad8cf32d185cc in 55ms, sequenceid=12, compaction requested=false; wal=null at 1731493573847 (+13 ms)Cleaning up temporary data from old regions at 1731493573849 (+2 ms)Region opened successfully at 1731493573856 (+7 ms) 2024-11-13T10:26:13,859 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 5363d25c8d3503512d5ad8cf32d185cc, NAME => 'testDatalossWhenInputError,,1731493573138.5363d25c8d3503512d5ad8cf32d185cc.', STARTKEY => '', ENDKEY => ''} 2024-11-13T10:26:13,859 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testDatalossWhenInputError,,1731493573138.5363d25c8d3503512d5ad8cf32d185cc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T10:26:13,860 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 5363d25c8d3503512d5ad8cf32d185cc 2024-11-13T10:26:13,860 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 5363d25c8d3503512d5ad8cf32d185cc 2024-11-13T10:26:13,861 INFO [StoreOpener-5363d25c8d3503512d5ad8cf32d185cc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 5363d25c8d3503512d5ad8cf32d185cc 2024-11-13T10:26:13,862 INFO [StoreOpener-5363d25c8d3503512d5ad8cf32d185cc-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5363d25c8d3503512d5ad8cf32d185cc columnFamilyName a 2024-11-13T10:26:13,862 DEBUG [StoreOpener-5363d25c8d3503512d5ad8cf32d185cc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:13,868 DEBUG [StoreOpener-5363d25c8d3503512d5ad8cf32d185cc-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41249/hbase/data/default/testDatalossWhenInputError/5363d25c8d3503512d5ad8cf32d185cc/a/bad0324f8b7744cea116d294ec24092d 2024-11-13T10:26:13,868 INFO [StoreOpener-5363d25c8d3503512d5ad8cf32d185cc-1 {}] regionserver.HStore(327): Store=5363d25c8d3503512d5ad8cf32d185cc/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:13,868 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 5363d25c8d3503512d5ad8cf32d185cc 2024-11-13T10:26:13,869 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testDatalossWhenInputError/5363d25c8d3503512d5ad8cf32d185cc 2024-11-13T10:26:13,870 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testDatalossWhenInputError/5363d25c8d3503512d5ad8cf32d185cc 2024-11-13T10:26:13,870 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 5363d25c8d3503512d5ad8cf32d185cc 2024-11-13T10:26:13,870 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 5363d25c8d3503512d5ad8cf32d185cc 2024-11-13T10:26:13,872 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 5363d25c8d3503512d5ad8cf32d185cc 2024-11-13T10:26:13,874 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41249/hbase/data/default/testDatalossWhenInputError/5363d25c8d3503512d5ad8cf32d185cc/recovered.edits/13.seqid, newMaxSeqId=13, maxSeqId=12 2024-11-13T10:26:13,875 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 5363d25c8d3503512d5ad8cf32d185cc; next sequenceid=14; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74832074, jitterRate=0.11508479714393616}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T10:26:13,876 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 5363d25c8d3503512d5ad8cf32d185cc: Writing region info on filesystem at 1731493573860Initializing all the Stores at 1731493573861 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493573861Cleaning up temporary data from old regions at 1731493573870 (+9 ms)Region opened successfully at 1731493573876 (+6 ms) 2024-11-13T10:26:13,893 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testDatalossWhenInputError Thread=423 (was 409) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2118105142_22 at /127.0.0.1:55196 [Waiting for operation #13] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-24-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-357896810-172.17.0.2-1731493536919:blk_1073741918_1096, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2118105142_22 at /127.0.0.1:52306 [Waiting for operation #17] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-24-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2118105142_22 at /127.0.0.1:50294 [Waiting for operation #12] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-24-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2118105142_22 at /127.0.0.1:50366 [Receiving block BP-357896810-172.17.0.2-1731493536919:blk_1073741918_1096] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-357896810-172.17.0.2-1731493536919:blk_1073741918_1096, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-357896810-172.17.0.2-1731493536919:blk_1073741918_1096, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2118105142_22 at /127.0.0.1:52368 [Receiving block BP-357896810-172.17.0.2-1731493536919:blk_1073741918_1096] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2118105142_22 at /127.0.0.1:55270 [Receiving block BP-357896810-172.17.0.2-1731493536919:blk_1073741918_1096] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1250 (was 1164) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=597 (was 597), ProcessCount=11 (was 11), AvailableMemoryMB=303 (was 312) 2024-11-13T10:26:13,893 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1250 is superior to 1024 2024-11-13T10:26:13,905 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testCompactedBulkLoadedFiles Thread=423, OpenFileDescriptor=1250, MaxFileDescriptor=1048576, SystemLoadAverage=597, ProcessCount=11, AvailableMemoryMB=302 2024-11-13T10:26:13,905 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1250 is superior to 1024 2024-11-13T10:26:13,920 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-13T10:26:13,922 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-13T10:26:13,922 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-13T10:26:13,925 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-86340555, suffix=, logDir=hdfs://localhost:41249/hbase/WALs/hregion-86340555, archiveDir=hdfs://localhost:41249/hbase/oldWALs, maxLogs=32 2024-11-13T10:26:13,939 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-86340555/hregion-86340555.1731493573925, exclude list is [], retry=0 2024-11-13T10:26:13,942 DEBUG [AsyncFSWAL-26-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38649,DS-4caa984b-780d-4d07-9178-6a891c1e8e45,DISK] 2024-11-13T10:26:13,942 DEBUG [AsyncFSWAL-26-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44787,DS-1af27950-119b-421a-9580-8a6fa7d1ebb0,DISK] 2024-11-13T10:26:13,943 DEBUG [AsyncFSWAL-26-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45097,DS-2ac43560-8e20-498a-852c-1b3a1f0157e9,DISK] 2024-11-13T10:26:13,945 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-86340555/hregion-86340555.1731493573925 2024-11-13T10:26:13,945 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45409:45409),(127.0.0.1/127.0.0.1:35745:35745),(127.0.0.1/127.0.0.1:42913:42913)] 2024-11-13T10:26:13,945 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 7c3ef0f1838c6889ae08eeb46da2b2ca, NAME => 'testCompactedBulkLoadedFiles,,1731493573921.7c3ef0f1838c6889ae08eeb46da2b2ca.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testCompactedBulkLoadedFiles', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41249/hbase 2024-11-13T10:26:13,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741921_1099 (size=63) 2024-11-13T10:26:13,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741921_1099 (size=63) 2024-11-13T10:26:13,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741921_1099 (size=63) 2024-11-13T10:26:13,958 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testCompactedBulkLoadedFiles,,1731493573921.7c3ef0f1838c6889ae08eeb46da2b2ca.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T10:26:13,960 INFO [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 7c3ef0f1838c6889ae08eeb46da2b2ca 2024-11-13T10:26:13,961 INFO [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7c3ef0f1838c6889ae08eeb46da2b2ca columnFamilyName a 2024-11-13T10:26:13,962 DEBUG [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:13,962 INFO [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] regionserver.HStore(327): Store=7c3ef0f1838c6889ae08eeb46da2b2ca/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:13,962 INFO [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 7c3ef0f1838c6889ae08eeb46da2b2ca 2024-11-13T10:26:13,964 INFO [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7c3ef0f1838c6889ae08eeb46da2b2ca columnFamilyName b 2024-11-13T10:26:13,964 DEBUG [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:13,965 INFO [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] regionserver.HStore(327): Store=7c3ef0f1838c6889ae08eeb46da2b2ca/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:13,965 INFO [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 7c3ef0f1838c6889ae08eeb46da2b2ca 2024-11-13T10:26:13,966 INFO [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7c3ef0f1838c6889ae08eeb46da2b2ca columnFamilyName c 2024-11-13T10:26:13,966 DEBUG [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:13,967 INFO [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] regionserver.HStore(327): Store=7c3ef0f1838c6889ae08eeb46da2b2ca/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:13,967 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 7c3ef0f1838c6889ae08eeb46da2b2ca 2024-11-13T10:26:13,968 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca 2024-11-13T10:26:13,968 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca 2024-11-13T10:26:13,969 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 7c3ef0f1838c6889ae08eeb46da2b2ca 2024-11-13T10:26:13,969 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 7c3ef0f1838c6889ae08eeb46da2b2ca 2024-11-13T10:26:13,970 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testCompactedBulkLoadedFiles descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-13T10:26:13,972 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 7c3ef0f1838c6889ae08eeb46da2b2ca 2024-11-13T10:26:13,974 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T10:26:13,975 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 7c3ef0f1838c6889ae08eeb46da2b2ca; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65268446, jitterRate=-0.027424365282058716}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-13T10:26:13,976 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 7c3ef0f1838c6889ae08eeb46da2b2ca: Writing region info on filesystem at 1731493573959Initializing all the Stores at 1731493573960 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493573960Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493573960Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493573960Cleaning up temporary data from old regions at 1731493573969 (+9 ms)Region opened successfully at 1731493573976 (+7 ms) 2024-11-13T10:26:13,976 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 7c3ef0f1838c6889ae08eeb46da2b2ca, disabling compactions & flushes 2024-11-13T10:26:13,976 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testCompactedBulkLoadedFiles,,1731493573921.7c3ef0f1838c6889ae08eeb46da2b2ca. 2024-11-13T10:26:13,976 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testCompactedBulkLoadedFiles,,1731493573921.7c3ef0f1838c6889ae08eeb46da2b2ca. 2024-11-13T10:26:13,976 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testCompactedBulkLoadedFiles,,1731493573921.7c3ef0f1838c6889ae08eeb46da2b2ca. after waiting 0 ms 2024-11-13T10:26:13,976 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testCompactedBulkLoadedFiles,,1731493573921.7c3ef0f1838c6889ae08eeb46da2b2ca. 2024-11-13T10:26:13,976 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testCompactedBulkLoadedFiles,,1731493573921.7c3ef0f1838c6889ae08eeb46da2b2ca. 2024-11-13T10:26:13,977 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 7c3ef0f1838c6889ae08eeb46da2b2ca: Waiting for close lock at 1731493573976Disabling compacts and flushes for region at 1731493573976Disabling writes for close at 1731493573976Writing region close event to WAL at 1731493573976Closed at 1731493573976 2024-11-13T10:26:13,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741920_1098 (size=95) 2024-11-13T10:26:13,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741920_1098 (size=95) 2024-11-13T10:26:13,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741920_1098 (size=95) 2024-11-13T10:26:13,986 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-13T10:26:13,986 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-86340555:(num 1731493573925) 2024-11-13T10:26:13,986 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-13T10:26:13,988 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:41249/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731493573919, archiveDir=hdfs://localhost:41249/hbase/oldWALs, maxLogs=32 2024-11-13T10:26:14,001 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731493573919/wal.1731493573988, exclude list is [], retry=0 2024-11-13T10:26:14,004 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44787,DS-1af27950-119b-421a-9580-8a6fa7d1ebb0,DISK] 2024-11-13T10:26:14,004 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45097,DS-2ac43560-8e20-498a-852c-1b3a1f0157e9,DISK] 2024-11-13T10:26:14,005 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38649,DS-4caa984b-780d-4d07-9178-6a891c1e8e45,DISK] 2024-11-13T10:26:14,007 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731493573919/wal.1731493573988 2024-11-13T10:26:14,007 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35745:35745),(127.0.0.1/127.0.0.1:42913:42913),(127.0.0.1/127.0.0.1:45409:45409)] 2024-11-13T10:26:14,007 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 7c3ef0f1838c6889ae08eeb46da2b2ca, NAME => 'testCompactedBulkLoadedFiles,,1731493573921.7c3ef0f1838c6889ae08eeb46da2b2ca.', STARTKEY => '', ENDKEY => ''} 2024-11-13T10:26:14,007 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testCompactedBulkLoadedFiles,,1731493573921.7c3ef0f1838c6889ae08eeb46da2b2ca.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T10:26:14,007 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 7c3ef0f1838c6889ae08eeb46da2b2ca 2024-11-13T10:26:14,007 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 7c3ef0f1838c6889ae08eeb46da2b2ca 2024-11-13T10:26:14,009 INFO [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 7c3ef0f1838c6889ae08eeb46da2b2ca 2024-11-13T10:26:14,009 INFO [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7c3ef0f1838c6889ae08eeb46da2b2ca columnFamilyName a 2024-11-13T10:26:14,010 DEBUG [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:14,010 INFO [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] regionserver.HStore(327): Store=7c3ef0f1838c6889ae08eeb46da2b2ca/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:14,010 INFO [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 7c3ef0f1838c6889ae08eeb46da2b2ca 2024-11-13T10:26:14,011 INFO [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7c3ef0f1838c6889ae08eeb46da2b2ca columnFamilyName b 2024-11-13T10:26:14,011 DEBUG [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:14,011 INFO [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] regionserver.HStore(327): Store=7c3ef0f1838c6889ae08eeb46da2b2ca/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:14,012 INFO [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 7c3ef0f1838c6889ae08eeb46da2b2ca 2024-11-13T10:26:14,012 INFO [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7c3ef0f1838c6889ae08eeb46da2b2ca columnFamilyName c 2024-11-13T10:26:14,013 DEBUG [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:14,013 INFO [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] regionserver.HStore(327): Store=7c3ef0f1838c6889ae08eeb46da2b2ca/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:14,013 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 7c3ef0f1838c6889ae08eeb46da2b2ca 2024-11-13T10:26:14,014 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca 2024-11-13T10:26:14,015 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca 2024-11-13T10:26:14,016 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 7c3ef0f1838c6889ae08eeb46da2b2ca 2024-11-13T10:26:14,016 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 7c3ef0f1838c6889ae08eeb46da2b2ca 2024-11-13T10:26:14,017 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testCompactedBulkLoadedFiles descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-13T10:26:14,018 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 7c3ef0f1838c6889ae08eeb46da2b2ca 2024-11-13T10:26:14,019 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 7c3ef0f1838c6889ae08eeb46da2b2ca; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60127407, jitterRate=-0.10403181612491608}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-13T10:26:14,020 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 7c3ef0f1838c6889ae08eeb46da2b2ca: Writing region info on filesystem at 1731493574007Initializing all the Stores at 1731493574008 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493574008Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493574009 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493574009Cleaning up temporary data from old regions at 1731493574016 (+7 ms)Region opened successfully at 1731493574020 (+4 ms) 2024-11-13T10:26:14,025 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41249/hbase/testCompactedBulkLoadedFiles/hfile0 is 32, key is 000/a:a/1731493574024/Put/seqid=0 2024-11-13T10:26:14,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741923_1101 (size=4875) 2024-11-13T10:26:14,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741923_1101 (size=4875) 2024-11-13T10:26:14,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741923_1101 (size=4875) 2024-11-13T10:26:14,041 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41249/hbase/testCompactedBulkLoadedFiles/hfile1 is 32, key is 100/a:a/1731493574041/Put/seqid=0 2024-11-13T10:26:14,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741924_1102 (size=4875) 2024-11-13T10:26:14,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741924_1102 (size=4875) 2024-11-13T10:26:14,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741924_1102 (size=4875) 2024-11-13T10:26:14,052 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41249/hbase/testCompactedBulkLoadedFiles/hfile2 is 32, key is 200/a:a/1731493574051/Put/seqid=0 2024-11-13T10:26:14,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741925_1103 (size=4875) 2024-11-13T10:26:14,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741925_1103 (size=4875) 2024-11-13T10:26:14,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741925_1103 (size=4875) 2024-11-13T10:26:14,060 INFO [Time-limited test {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:41249/hbase/testCompactedBulkLoadedFiles/hfile0 for inclusion in 7c3ef0f1838c6889ae08eeb46da2b2ca/a 2024-11-13T10:26:14,065 DEBUG [Time-limited test {}] regionserver.HStore(626): HFile bounds: first=000 last=050 2024-11-13T10:26:14,065 DEBUG [Time-limited test {}] regionserver.HStore(628): Region bounds: first= last= 2024-11-13T10:26:14,065 INFO [Time-limited test {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:41249/hbase/testCompactedBulkLoadedFiles/hfile1 for inclusion in 7c3ef0f1838c6889ae08eeb46da2b2ca/a 2024-11-13T10:26:14,069 DEBUG [Time-limited test {}] regionserver.HStore(626): HFile bounds: first=100 last=150 2024-11-13T10:26:14,069 DEBUG [Time-limited test {}] regionserver.HStore(628): Region bounds: first= last= 2024-11-13T10:26:14,069 INFO [Time-limited test {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:41249/hbase/testCompactedBulkLoadedFiles/hfile2 for inclusion in 7c3ef0f1838c6889ae08eeb46da2b2ca/a 2024-11-13T10:26:14,073 DEBUG [Time-limited test {}] regionserver.HStore(626): HFile bounds: first=200 last=250 2024-11-13T10:26:14,073 DEBUG [Time-limited test {}] regionserver.HStore(628): Region bounds: first= last= 2024-11-13T10:26:14,073 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 7c3ef0f1838c6889ae08eeb46da2b2ca 3/3 column families, dataSize=51 B heapSize=896 B 2024-11-13T10:26:14,090 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/.tmp/a/669e307373274695b43c0ca3f421b4b9 is 55, key is testCompactedBulkLoadedFiles/a:a/1731493574020/Put/seqid=0 2024-11-13T10:26:14,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741926_1104 (size=5107) 2024-11-13T10:26:14,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741926_1104 (size=5107) 2024-11-13T10:26:14,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741926_1104 (size=5107) 2024-11-13T10:26:14,098 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51 B at sequenceid=4 (bloomFilter=true), to=hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/.tmp/a/669e307373274695b43c0ca3f421b4b9 2024-11-13T10:26:14,104 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/.tmp/a/669e307373274695b43c0ca3f421b4b9 as hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/a/669e307373274695b43c0ca3f421b4b9 2024-11-13T10:26:14,110 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/a/669e307373274695b43c0ca3f421b4b9, entries=1, sequenceid=4, filesize=5.0 K 2024-11-13T10:26:14,111 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~51 B/51, heapSize ~368 B/368, currentSize=0 B/0 for 7c3ef0f1838c6889ae08eeb46da2b2ca in 38ms, sequenceid=4, compaction requested=false 2024-11-13T10:26:14,111 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 7c3ef0f1838c6889ae08eeb46da2b2ca: 2024-11-13T10:26:14,113 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41249/hbase/testCompactedBulkLoadedFiles/hfile0 as hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/a/1e6dcea501da47b58b0c68e217317be4_SeqId_4_ 2024-11-13T10:26:14,114 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41249/hbase/testCompactedBulkLoadedFiles/hfile1 as hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/a/9084a6b47072428bb223d85af70cee75_SeqId_4_ 2024-11-13T10:26:14,115 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41249/hbase/testCompactedBulkLoadedFiles/hfile2 as hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/a/f54a1103b9184255955a7d9e2843ddd4_SeqId_4_ 2024-11-13T10:26:14,116 INFO [Time-limited test {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:41249/hbase/testCompactedBulkLoadedFiles/hfile0 into 7c3ef0f1838c6889ae08eeb46da2b2ca/a as hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/a/1e6dcea501da47b58b0c68e217317be4_SeqId_4_ - updating store file list. 2024-11-13T10:26:14,121 DEBUG [Time-limited test {}] regionserver.HStoreFile(483): HFile Bloom filter type for 1e6dcea501da47b58b0c68e217317be4_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-13T10:26:14,121 INFO [Time-limited test {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/a/1e6dcea501da47b58b0c68e217317be4_SeqId_4_ into 7c3ef0f1838c6889ae08eeb46da2b2ca/a 2024-11-13T10:26:14,121 INFO [Time-limited test {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:41249/hbase/testCompactedBulkLoadedFiles/hfile0 into 7c3ef0f1838c6889ae08eeb46da2b2ca/a (new location: hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/a/1e6dcea501da47b58b0c68e217317be4_SeqId_4_) 2024-11-13T10:26:14,122 INFO [Time-limited test {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:41249/hbase/testCompactedBulkLoadedFiles/hfile1 into 7c3ef0f1838c6889ae08eeb46da2b2ca/a as hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/a/9084a6b47072428bb223d85af70cee75_SeqId_4_ - updating store file list. 2024-11-13T10:26:14,127 DEBUG [Time-limited test {}] regionserver.HStoreFile(483): HFile Bloom filter type for 9084a6b47072428bb223d85af70cee75_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-13T10:26:14,127 INFO [Time-limited test {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/a/9084a6b47072428bb223d85af70cee75_SeqId_4_ into 7c3ef0f1838c6889ae08eeb46da2b2ca/a 2024-11-13T10:26:14,127 INFO [Time-limited test {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:41249/hbase/testCompactedBulkLoadedFiles/hfile1 into 7c3ef0f1838c6889ae08eeb46da2b2ca/a (new location: hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/a/9084a6b47072428bb223d85af70cee75_SeqId_4_) 2024-11-13T10:26:14,128 INFO [Time-limited test {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:41249/hbase/testCompactedBulkLoadedFiles/hfile2 into 7c3ef0f1838c6889ae08eeb46da2b2ca/a as hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/a/f54a1103b9184255955a7d9e2843ddd4_SeqId_4_ - updating store file list. 2024-11-13T10:26:14,133 DEBUG [Time-limited test {}] regionserver.HStoreFile(483): HFile Bloom filter type for f54a1103b9184255955a7d9e2843ddd4_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-13T10:26:14,133 INFO [Time-limited test {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/a/f54a1103b9184255955a7d9e2843ddd4_SeqId_4_ into 7c3ef0f1838c6889ae08eeb46da2b2ca/a 2024-11-13T10:26:14,133 INFO [Time-limited test {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:41249/hbase/testCompactedBulkLoadedFiles/hfile2 into 7c3ef0f1838c6889ae08eeb46da2b2ca/a (new location: hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/a/f54a1103b9184255955a7d9e2843ddd4_SeqId_4_) 2024-11-13T10:26:14,143 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-13T10:26:14,143 DEBUG [Time-limited test {}] regionserver.HStore(1541): 7c3ef0f1838c6889ae08eeb46da2b2ca/a is initiating major compaction (all files) 2024-11-13T10:26:14,143 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 7c3ef0f1838c6889ae08eeb46da2b2ca/a in testCompactedBulkLoadedFiles,,1731493573921.7c3ef0f1838c6889ae08eeb46da2b2ca. 2024-11-13T10:26:14,144 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/a/669e307373274695b43c0ca3f421b4b9, hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/a/1e6dcea501da47b58b0c68e217317be4_SeqId_4_, hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/a/9084a6b47072428bb223d85af70cee75_SeqId_4_, hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/a/f54a1103b9184255955a7d9e2843ddd4_SeqId_4_] into tmpdir=hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/.tmp, totalSize=19.3 K 2024-11-13T10:26:14,144 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 669e307373274695b43c0ca3f421b4b9, keycount=1, bloomtype=ROW, size=5.0 K, encoding=NONE, compression=NONE, seqNum=4, earliestPutTs=1731493574020 2024-11-13T10:26:14,145 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 1e6dcea501da47b58b0c68e217317be4_SeqId_4_, keycount=10, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=4, earliestPutTs=-9223372036854775808 2024-11-13T10:26:14,145 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 9084a6b47072428bb223d85af70cee75_SeqId_4_, keycount=10, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=4, earliestPutTs=-9223372036854775808 2024-11-13T10:26:14,146 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting f54a1103b9184255955a7d9e2843ddd4_SeqId_4_, keycount=10, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=4, earliestPutTs=-9223372036854775808 2024-11-13T10:26:14,161 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/.tmp/a/01f0c69123ab4c2e832069d6c2383ab2 is 55, key is testCompactedBulkLoadedFiles/a:a/1731493574020/Put/seqid=0 2024-11-13T10:26:14,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741927_1105 (size=6154) 2024-11-13T10:26:14,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741927_1105 (size=6154) 2024-11-13T10:26:14,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741927_1105 (size=6154) 2024-11-13T10:26:14,177 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/.tmp/a/01f0c69123ab4c2e832069d6c2383ab2 as hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/a/01f0c69123ab4c2e832069d6c2383ab2 2024-11-13T10:26:14,183 INFO [Time-limited test {}] regionserver.HStore(1337): Completed major compaction of 4 (all) file(s) in 7c3ef0f1838c6889ae08eeb46da2b2ca/a of 7c3ef0f1838c6889ae08eeb46da2b2ca into 01f0c69123ab4c2e832069d6c2383ab2(size=6.0 K), total size for store is 6.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T10:26:14,183 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 7c3ef0f1838c6889ae08eeb46da2b2ca: 2024-11-13T10:26:14,183 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 0 store files, 0 compacting, 0 eligible, 16 blocking 2024-11-13T10:26:14,183 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 0 store files, 0 compacting, 0 eligible, 16 blocking 2024-11-13T10:26:14,216 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:41249/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731493573919/wal.1731493573988, size=0 (0bytes) 2024-11-13T10:26:14,216 WARN [Time-limited test {}] wal.WALSplitter(453): File hdfs://localhost:41249/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731493573919/wal.1731493573988 might be still open, length is 0 2024-11-13T10:26:14,216 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41249/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731493573919/wal.1731493573988 2024-11-13T10:26:14,216 WARN [IPC Server handler 2 on default port 41249 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731493573919/wal.1731493573988 has not been closed. Lease recovery is in progress. RecoveryId = 1106 for block blk_1073741922_1100 2024-11-13T10:26:14,217 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41249/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731493573919/wal.1731493573988 after 1ms 2024-11-13T10:26:15,260 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2118105142_22 at /127.0.0.1:52390 [Receiving block BP-357896810-172.17.0.2-1731493536919:blk_1073741922_1100] {}] datanode.DataXceiver(331): 127.0.0.1:44787:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52390 dst: /127.0.0.1:44787 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:44787 remote=/127.0.0.1:52390]. Total timeout mills is 60000, 58924 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T10:26:15,260 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2118105142_22 at /127.0.0.1:50410 [Receiving block BP-357896810-172.17.0.2-1731493536919:blk_1073741922_1100] {}] datanode.DataXceiver(331): 127.0.0.1:38649:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50410 dst: /127.0.0.1:38649 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T10:26:15,260 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2118105142_22 at /127.0.0.1:55310 [Receiving block BP-357896810-172.17.0.2-1731493536919:blk_1073741922_1100] {}] datanode.DataXceiver(331): 127.0.0.1:45097:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55310 dst: /127.0.0.1:45097 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T10:26:15,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741922_1106 (size=1176) 2024-11-13T10:26:15,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741922_1106 (size=1176) 2024-11-13T10:26:18,217 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:41249/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731493573919/wal.1731493573988 after 4001ms 2024-11-13T10:26:18,221 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:41249/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731493573919/wal.1731493573988: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-13T10:26:18,221 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:41249/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731493573919/wal.1731493573988 took 4005ms 2024-11-13T10:26:18,223 INFO [Time-limited test {}] wal.WALSplitter(493): EOF from hdfs://localhost:41249/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731493573919/wal.1731493573988; continuing. 2024-11-13T10:26:18,223 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:41249/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731493573919/wal.1731493573988 so closing down 2024-11-13T10:26:18,223 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-13T10:26:18,225 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1731493573988.temp 2024-11-13T10:26:18,227 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/recovered.edits/0000000000000000003-wal.1731493573988.temp 2024-11-13T10:26:18,227 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-13T10:26:18,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741928_1107 (size=550) 2024-11-13T10:26:18,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741928_1107 (size=550) 2024-11-13T10:26:18,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741928_1107 (size=550) 2024-11-13T10:26:18,235 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/recovered.edits/0000000000000000003-wal.1731493573988.temp (wrote 2 edits, skipped 0 edits in 0 ms) 2024-11-13T10:26:18,237 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/recovered.edits/0000000000000000003-wal.1731493573988.temp to hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/recovered.edits/0000000000000000008 2024-11-13T10:26:18,237 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 5 edits across 1 Regions in 15 ms; skipped=3; WAL=hdfs://localhost:41249/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731493573919/wal.1731493573988, size=0, length=0, corrupted=false, cancelled=false 2024-11-13T10:26:18,237 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:41249/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731493573919/wal.1731493573988, journal: Splitting hdfs://localhost:41249/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731493573919/wal.1731493573988, size=0 (0bytes) at 1731493574216Finishing writing output for hdfs://localhost:41249/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731493573919/wal.1731493573988 so closing down at 1731493578223 (+4007 ms)Creating recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/recovered.edits/0000000000000000003-wal.1731493573988.temp at 1731493578227 (+4 ms)3 split writer threads finished at 1731493578227Closed recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/recovered.edits/0000000000000000003-wal.1731493573988.temp (wrote 2 edits, skipped 0 edits in 0 ms) at 1731493578235 (+8 ms)Rename recovered edits hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/recovered.edits/0000000000000000003-wal.1731493573988.temp to hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/recovered.edits/0000000000000000008 at 1731493578237 (+2 ms)Processed 5 edits across 1 Regions in 15 ms; skipped=3; WAL=hdfs://localhost:41249/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731493573919/wal.1731493573988, size=0, length=0, corrupted=false, cancelled=false at 1731493578237 2024-11-13T10:26:18,239 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:41249/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731493573919/wal.1731493573988 to hdfs://localhost:41249/hbase/oldWALs/wal.1731493573988 2024-11-13T10:26:18,240 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/recovered.edits/0000000000000000008 2024-11-13T10:26:18,240 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-13T10:26:18,242 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:41249/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731493573919, archiveDir=hdfs://localhost:41249/hbase/oldWALs, maxLogs=32 2024-11-13T10:26:18,255 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731493573919/wal.1731493578242, exclude list is [], retry=0 2024-11-13T10:26:18,258 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38649,DS-4caa984b-780d-4d07-9178-6a891c1e8e45,DISK] 2024-11-13T10:26:18,258 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45097,DS-2ac43560-8e20-498a-852c-1b3a1f0157e9,DISK] 2024-11-13T10:26:18,258 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44787,DS-1af27950-119b-421a-9580-8a6fa7d1ebb0,DISK] 2024-11-13T10:26:18,260 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731493573919/wal.1731493578242 2024-11-13T10:26:18,261 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45409:45409),(127.0.0.1/127.0.0.1:42913:42913),(127.0.0.1/127.0.0.1:35745:35745)] 2024-11-13T10:26:18,261 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 7c3ef0f1838c6889ae08eeb46da2b2ca, NAME => 'testCompactedBulkLoadedFiles,,1731493573921.7c3ef0f1838c6889ae08eeb46da2b2ca.', STARTKEY => '', ENDKEY => ''} 2024-11-13T10:26:18,261 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testCompactedBulkLoadedFiles,,1731493573921.7c3ef0f1838c6889ae08eeb46da2b2ca.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T10:26:18,261 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 7c3ef0f1838c6889ae08eeb46da2b2ca 2024-11-13T10:26:18,261 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 7c3ef0f1838c6889ae08eeb46da2b2ca 2024-11-13T10:26:18,262 INFO [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 7c3ef0f1838c6889ae08eeb46da2b2ca 2024-11-13T10:26:18,263 INFO [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7c3ef0f1838c6889ae08eeb46da2b2ca columnFamilyName a 2024-11-13T10:26:18,263 DEBUG [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:18,270 DEBUG [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/a/01f0c69123ab4c2e832069d6c2383ab2 2024-11-13T10:26:18,274 DEBUG [StoreFileOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-a-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 1e6dcea501da47b58b0c68e217317be4_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-13T10:26:18,274 DEBUG [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/a/1e6dcea501da47b58b0c68e217317be4_SeqId_4_ 2024-11-13T10:26:18,277 DEBUG [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/a/669e307373274695b43c0ca3f421b4b9 2024-11-13T10:26:18,280 DEBUG [StoreFileOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-a-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 9084a6b47072428bb223d85af70cee75_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-13T10:26:18,280 DEBUG [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/a/9084a6b47072428bb223d85af70cee75_SeqId_4_ 2024-11-13T10:26:18,284 DEBUG [StoreFileOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-a-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for f54a1103b9184255955a7d9e2843ddd4_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-13T10:26:18,284 DEBUG [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/a/f54a1103b9184255955a7d9e2843ddd4_SeqId_4_ 2024-11-13T10:26:18,284 WARN [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] regionserver.StoreEngine(317): Clearing the compacted storefile hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/a/1e6dcea501da47b58b0c68e217317be4_SeqId_4_ from org.apache.hadoop.hbase.regionserver.DefaultStoreEngine@4ae6db7 2024-11-13T10:26:18,284 WARN [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] regionserver.StoreEngine(317): Clearing the compacted storefile hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/a/669e307373274695b43c0ca3f421b4b9 from org.apache.hadoop.hbase.regionserver.DefaultStoreEngine@4ae6db7 2024-11-13T10:26:18,284 WARN [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] regionserver.StoreEngine(317): Clearing the compacted storefile hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/a/9084a6b47072428bb223d85af70cee75_SeqId_4_ from org.apache.hadoop.hbase.regionserver.DefaultStoreEngine@4ae6db7 2024-11-13T10:26:18,284 WARN [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] regionserver.StoreEngine(317): Clearing the compacted storefile hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/a/f54a1103b9184255955a7d9e2843ddd4_SeqId_4_ from org.apache.hadoop.hbase.regionserver.DefaultStoreEngine@4ae6db7 2024-11-13T10:26:18,284 DEBUG [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] regionserver.StoreEngine(327): Moving the files [hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/a/1e6dcea501da47b58b0c68e217317be4_SeqId_4_, hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/a/669e307373274695b43c0ca3f421b4b9, hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/a/9084a6b47072428bb223d85af70cee75_SeqId_4_, hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/a/f54a1103b9184255955a7d9e2843ddd4_SeqId_4_] to archive 2024-11-13T10:26:18,285 DEBUG [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-13T10:26:18,287 DEBUG [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/a/1e6dcea501da47b58b0c68e217317be4_SeqId_4_ to hdfs://localhost:41249/hbase/archive/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/a/1e6dcea501da47b58b0c68e217317be4_SeqId_4_ 2024-11-13T10:26:18,288 DEBUG [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/a/669e307373274695b43c0ca3f421b4b9 to hdfs://localhost:41249/hbase/archive/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/a/669e307373274695b43c0ca3f421b4b9 2024-11-13T10:26:18,289 DEBUG [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/a/9084a6b47072428bb223d85af70cee75_SeqId_4_ to hdfs://localhost:41249/hbase/archive/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/a/9084a6b47072428bb223d85af70cee75_SeqId_4_ 2024-11-13T10:26:18,290 DEBUG [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/a/f54a1103b9184255955a7d9e2843ddd4_SeqId_4_ to hdfs://localhost:41249/hbase/archive/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/a/f54a1103b9184255955a7d9e2843ddd4_SeqId_4_ 2024-11-13T10:26:18,291 INFO [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] regionserver.HStore(327): Store=7c3ef0f1838c6889ae08eeb46da2b2ca/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:18,291 INFO [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 7c3ef0f1838c6889ae08eeb46da2b2ca 2024-11-13T10:26:18,292 INFO [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7c3ef0f1838c6889ae08eeb46da2b2ca columnFamilyName b 2024-11-13T10:26:18,292 DEBUG [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:18,292 INFO [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] regionserver.HStore(327): Store=7c3ef0f1838c6889ae08eeb46da2b2ca/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:18,292 INFO [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 7c3ef0f1838c6889ae08eeb46da2b2ca 2024-11-13T10:26:18,293 INFO [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7c3ef0f1838c6889ae08eeb46da2b2ca columnFamilyName c 2024-11-13T10:26:18,293 DEBUG [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:18,293 INFO [StoreOpener-7c3ef0f1838c6889ae08eeb46da2b2ca-1 {}] regionserver.HStore(327): Store=7c3ef0f1838c6889ae08eeb46da2b2ca/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:18,294 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 7c3ef0f1838c6889ae08eeb46da2b2ca 2024-11-13T10:26:18,294 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca 2024-11-13T10:26:18,296 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca 2024-11-13T10:26:18,296 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/recovered.edits/0000000000000000008 2024-11-13T10:26:18,298 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/recovered.edits/0000000000000000008: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-13T10:26:18,299 DEBUG [Time-limited test {}] regionserver.HRegion(5836): 7c3ef0f1838c6889ae08eeb46da2b2ca : Replaying compaction marker table_name: "testCompactedBulkLoadedFiles" encoded_region_name: "7c3ef0f1838c6889ae08eeb46da2b2ca" family_name: "a" compaction_input: "669e307373274695b43c0ca3f421b4b9" compaction_input: "1e6dcea501da47b58b0c68e217317be4_SeqId_4_" compaction_input: "9084a6b47072428bb223d85af70cee75_SeqId_4_" compaction_input: "f54a1103b9184255955a7d9e2843ddd4_SeqId_4_" compaction_output: "01f0c69123ab4c2e832069d6c2383ab2" store_home_dir: "a" region_name: "testCompactedBulkLoadedFiles,,1731493573921.7c3ef0f1838c6889ae08eeb46da2b2ca." with seqId=9223372036854775807 and lastReplayedOpenRegionSeqId=-1 2024-11-13T10:26:18,300 DEBUG [Time-limited test {}] regionserver.HStore(1354): Completing compaction from the WAL marker 2024-11-13T10:26:18,300 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 0, skipped 2, firstSequenceIdInLog=3, maxSequenceIdInLog=8, path=hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/recovered.edits/0000000000000000008 2024-11-13T10:26:18,300 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/recovered.edits/0000000000000000008 2024-11-13T10:26:18,301 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 7c3ef0f1838c6889ae08eeb46da2b2ca 2024-11-13T10:26:18,301 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 7c3ef0f1838c6889ae08eeb46da2b2ca 2024-11-13T10:26:18,302 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testCompactedBulkLoadedFiles descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-13T10:26:18,303 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 7c3ef0f1838c6889ae08eeb46da2b2ca 2024-11-13T10:26:18,305 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41249/hbase/data/default/testCompactedBulkLoadedFiles/7c3ef0f1838c6889ae08eeb46da2b2ca/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-13T10:26:18,306 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 7c3ef0f1838c6889ae08eeb46da2b2ca; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65170716, jitterRate=-0.028880655765533447}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-13T10:26:18,306 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 7c3ef0f1838c6889ae08eeb46da2b2ca: Writing region info on filesystem at 1731493578261Initializing all the Stores at 1731493578262 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493578262Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493578262Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493578262Cleaning up temporary data from old regions at 1731493578301 (+39 ms)Region opened successfully at 1731493578306 (+5 ms) 2024-11-13T10:26:18,309 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 7c3ef0f1838c6889ae08eeb46da2b2ca, disabling compactions & flushes 2024-11-13T10:26:18,309 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testCompactedBulkLoadedFiles,,1731493573921.7c3ef0f1838c6889ae08eeb46da2b2ca. 2024-11-13T10:26:18,309 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testCompactedBulkLoadedFiles,,1731493573921.7c3ef0f1838c6889ae08eeb46da2b2ca. 2024-11-13T10:26:18,309 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testCompactedBulkLoadedFiles,,1731493573921.7c3ef0f1838c6889ae08eeb46da2b2ca. after waiting 0 ms 2024-11-13T10:26:18,309 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testCompactedBulkLoadedFiles,,1731493573921.7c3ef0f1838c6889ae08eeb46da2b2ca. 2024-11-13T10:26:18,310 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testCompactedBulkLoadedFiles,,1731493573921.7c3ef0f1838c6889ae08eeb46da2b2ca. 2024-11-13T10:26:18,310 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 7c3ef0f1838c6889ae08eeb46da2b2ca: Waiting for close lock at 1731493578309Disabling compacts and flushes for region at 1731493578309Disabling writes for close at 1731493578309Writing region close event to WAL at 1731493578310 (+1 ms)Closed at 1731493578310 2024-11-13T10:26:18,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741929_1108 (size=95) 2024-11-13T10:26:18,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741929_1108 (size=95) 2024-11-13T10:26:18,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741929_1108 (size=95) 2024-11-13T10:26:18,316 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-13T10:26:18,316 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1731493578242) 2024-11-13T10:26:18,330 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testCompactedBulkLoadedFiles Thread=432 (was 423) Potentially hanging thread: IPC Client (1396088596) connection to localhost/127.0.0.1:41249 from jenkinstestCompactedBulkLoadedFiles java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: AsyncFSWAL-26-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1236241522_22 at /127.0.0.1:50468 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1236241522_22 at /127.0.0.1:55370 [Waiting for operation #11] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1236241522_22 at /127.0.0.1:52476 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-26-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-26-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkinstestCompactedBulkLoadedFiles@localhost:41249 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1330 (was 1250) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=589 (was 597), ProcessCount=11 (was 11), AvailableMemoryMB=270 (was 302) 2024-11-13T10:26:18,330 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1330 is superior to 1024 2024-11-13T10:26:18,347 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsWrittenViaHRegion Thread=432, OpenFileDescriptor=1330, MaxFileDescriptor=1048576, SystemLoadAverage=589, ProcessCount=11, AvailableMemoryMB=269 2024-11-13T10:26:18,347 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1330 is superior to 1024 2024-11-13T10:26:18,362 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-13T10:26:18,362 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-13T10:26:18,365 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-13T10:26:18,366 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-13T10:26:18,369 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-35878491, suffix=, logDir=hdfs://localhost:41249/hbase/WALs/hregion-35878491, archiveDir=hdfs://localhost:41249/hbase/oldWALs, maxLogs=32 2024-11-13T10:26:18,392 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-35878491/hregion-35878491.1731493578369, exclude list is [], retry=0 2024-11-13T10:26:18,395 DEBUG [AsyncFSWAL-28-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38649,DS-4caa984b-780d-4d07-9178-6a891c1e8e45,DISK] 2024-11-13T10:26:18,395 DEBUG [AsyncFSWAL-28-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45097,DS-2ac43560-8e20-498a-852c-1b3a1f0157e9,DISK] 2024-11-13T10:26:18,396 DEBUG [AsyncFSWAL-28-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44787,DS-1af27950-119b-421a-9580-8a6fa7d1ebb0,DISK] 2024-11-13T10:26:18,398 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-35878491/hregion-35878491.1731493578369 2024-11-13T10:26:18,398 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45409:45409),(127.0.0.1/127.0.0.1:42913:42913),(127.0.0.1/127.0.0.1:35745:35745)] 2024-11-13T10:26:18,398 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => d55fca8381d823986a147abd80ee2bd3, NAME => 'testReplayEditsWrittenViaHRegion,,1731493578363.d55fca8381d823986a147abd80ee2bd3.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenViaHRegion', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41249/hbase 2024-11-13T10:26:18,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741931_1110 (size=67) 2024-11-13T10:26:18,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741931_1110 (size=67) 2024-11-13T10:26:18,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741931_1110 (size=67) 2024-11-13T10:26:18,406 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1731493578363.d55fca8381d823986a147abd80ee2bd3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T10:26:18,408 INFO [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region d55fca8381d823986a147abd80ee2bd3 2024-11-13T10:26:18,409 INFO [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d55fca8381d823986a147abd80ee2bd3 columnFamilyName a 2024-11-13T10:26:18,409 DEBUG [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:18,409 INFO [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] regionserver.HStore(327): Store=d55fca8381d823986a147abd80ee2bd3/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:18,410 INFO [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region d55fca8381d823986a147abd80ee2bd3 2024-11-13T10:26:18,411 INFO [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d55fca8381d823986a147abd80ee2bd3 columnFamilyName b 2024-11-13T10:26:18,411 DEBUG [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:18,411 INFO [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] regionserver.HStore(327): Store=d55fca8381d823986a147abd80ee2bd3/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:18,411 INFO [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region d55fca8381d823986a147abd80ee2bd3 2024-11-13T10:26:18,412 INFO [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d55fca8381d823986a147abd80ee2bd3 columnFamilyName c 2024-11-13T10:26:18,412 DEBUG [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:18,413 INFO [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] regionserver.HStore(327): Store=d55fca8381d823986a147abd80ee2bd3/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:18,413 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for d55fca8381d823986a147abd80ee2bd3 2024-11-13T10:26:18,413 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3 2024-11-13T10:26:18,413 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3 2024-11-13T10:26:18,414 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for d55fca8381d823986a147abd80ee2bd3 2024-11-13T10:26:18,414 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for d55fca8381d823986a147abd80ee2bd3 2024-11-13T10:26:18,415 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-13T10:26:18,416 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for d55fca8381d823986a147abd80ee2bd3 2024-11-13T10:26:18,418 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T10:26:18,418 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened d55fca8381d823986a147abd80ee2bd3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69021042, jitterRate=0.028493672609329224}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-13T10:26:18,418 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for d55fca8381d823986a147abd80ee2bd3: Writing region info on filesystem at 1731493578407Initializing all the Stores at 1731493578407Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493578407Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493578407Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493578407Cleaning up temporary data from old regions at 1731493578414 (+7 ms)Region opened successfully at 1731493578418 (+4 ms) 2024-11-13T10:26:18,418 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing d55fca8381d823986a147abd80ee2bd3, disabling compactions & flushes 2024-11-13T10:26:18,418 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1731493578363.d55fca8381d823986a147abd80ee2bd3. 2024-11-13T10:26:18,418 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1731493578363.d55fca8381d823986a147abd80ee2bd3. 2024-11-13T10:26:18,418 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1731493578363.d55fca8381d823986a147abd80ee2bd3. after waiting 0 ms 2024-11-13T10:26:18,418 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1731493578363.d55fca8381d823986a147abd80ee2bd3. 2024-11-13T10:26:18,419 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1731493578363.d55fca8381d823986a147abd80ee2bd3. 2024-11-13T10:26:18,419 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for d55fca8381d823986a147abd80ee2bd3: Waiting for close lock at 1731493578418Disabling compacts and flushes for region at 1731493578418Disabling writes for close at 1731493578418Writing region close event to WAL at 1731493578419 (+1 ms)Closed at 1731493578419 2024-11-13T10:26:18,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741930_1109 (size=95) 2024-11-13T10:26:18,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741930_1109 (size=95) 2024-11-13T10:26:18,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741930_1109 (size=95) 2024-11-13T10:26:18,424 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-13T10:26:18,424 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-35878491:(num 1731493578369) 2024-11-13T10:26:18,424 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-13T10:26:18,425 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:41249/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731493578361, archiveDir=hdfs://localhost:41249/hbase/oldWALs, maxLogs=32 2024-11-13T10:26:18,439 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731493578361/wal.1731493578426, exclude list is [], retry=0 2024-11-13T10:26:18,442 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38649,DS-4caa984b-780d-4d07-9178-6a891c1e8e45,DISK] 2024-11-13T10:26:18,442 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45097,DS-2ac43560-8e20-498a-852c-1b3a1f0157e9,DISK] 2024-11-13T10:26:18,442 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44787,DS-1af27950-119b-421a-9580-8a6fa7d1ebb0,DISK] 2024-11-13T10:26:18,444 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731493578361/wal.1731493578426 2024-11-13T10:26:18,444 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45409:45409),(127.0.0.1/127.0.0.1:42913:42913),(127.0.0.1/127.0.0.1:35745:35745)] 2024-11-13T10:26:18,444 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => d55fca8381d823986a147abd80ee2bd3, NAME => 'testReplayEditsWrittenViaHRegion,,1731493578363.d55fca8381d823986a147abd80ee2bd3.', STARTKEY => '', ENDKEY => ''} 2024-11-13T10:26:18,444 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1731493578363.d55fca8381d823986a147abd80ee2bd3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T10:26:18,444 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for d55fca8381d823986a147abd80ee2bd3 2024-11-13T10:26:18,444 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for d55fca8381d823986a147abd80ee2bd3 2024-11-13T10:26:18,446 INFO [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region d55fca8381d823986a147abd80ee2bd3 2024-11-13T10:26:18,447 INFO [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d55fca8381d823986a147abd80ee2bd3 columnFamilyName a 2024-11-13T10:26:18,447 DEBUG [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:18,447 INFO [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] regionserver.HStore(327): Store=d55fca8381d823986a147abd80ee2bd3/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:18,447 INFO [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region d55fca8381d823986a147abd80ee2bd3 2024-11-13T10:26:18,448 INFO [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d55fca8381d823986a147abd80ee2bd3 columnFamilyName b 2024-11-13T10:26:18,448 DEBUG [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:18,448 INFO [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] regionserver.HStore(327): Store=d55fca8381d823986a147abd80ee2bd3/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:18,448 INFO [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region d55fca8381d823986a147abd80ee2bd3 2024-11-13T10:26:18,449 INFO [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d55fca8381d823986a147abd80ee2bd3 columnFamilyName c 2024-11-13T10:26:18,449 DEBUG [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:18,449 INFO [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] regionserver.HStore(327): Store=d55fca8381d823986a147abd80ee2bd3/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:18,449 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for d55fca8381d823986a147abd80ee2bd3 2024-11-13T10:26:18,450 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3 2024-11-13T10:26:18,451 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3 2024-11-13T10:26:18,452 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for d55fca8381d823986a147abd80ee2bd3 2024-11-13T10:26:18,452 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for d55fca8381d823986a147abd80ee2bd3 2024-11-13T10:26:18,452 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-13T10:26:18,453 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for d55fca8381d823986a147abd80ee2bd3 2024-11-13T10:26:18,454 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened d55fca8381d823986a147abd80ee2bd3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64560348, jitterRate=-0.037975847721099854}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-13T10:26:18,454 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for d55fca8381d823986a147abd80ee2bd3: Writing region info on filesystem at 1731493578445Initializing all the Stores at 1731493578445Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493578445Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493578445Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493578445Cleaning up temporary data from old regions at 1731493578452 (+7 ms)Region opened successfully at 1731493578454 (+2 ms) 2024-11-13T10:26:18,463 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing d55fca8381d823986a147abd80ee2bd3 3/3 column families, dataSize=870 B heapSize=2.31 KB 2024-11-13T10:26:18,488 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/.tmp/a/cd72a206788445798915d9bb0f693cc2 is 91, key is testReplayEditsWrittenViaHRegion/a:x0/1731493578454/Put/seqid=0 2024-11-13T10:26:18,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741933_1112 (size=5958) 2024-11-13T10:26:18,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741933_1112 (size=5958) 2024-11-13T10:26:18,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741933_1112 (size=5958) 2024-11-13T10:26:18,503 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/.tmp/a/cd72a206788445798915d9bb0f693cc2 2024-11-13T10:26:18,552 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/.tmp/a/cd72a206788445798915d9bb0f693cc2 as hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/a/cd72a206788445798915d9bb0f693cc2 2024-11-13T10:26:18,557 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/a/cd72a206788445798915d9bb0f693cc2, entries=10, sequenceid=13, filesize=5.8 K 2024-11-13T10:26:18,559 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~870 B/870, heapSize ~1.80 KB/1840, currentSize=0 B/0 for d55fca8381d823986a147abd80ee2bd3 in 95ms, sequenceid=13, compaction requested=false 2024-11-13T10:26:18,559 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for d55fca8381d823986a147abd80ee2bd3: 2024-11-13T10:26:18,576 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing d55fca8381d823986a147abd80ee2bd3, disabling compactions & flushes 2024-11-13T10:26:18,576 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1731493578363.d55fca8381d823986a147abd80ee2bd3. 2024-11-13T10:26:18,577 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1731493578363.d55fca8381d823986a147abd80ee2bd3. 2024-11-13T10:26:18,577 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1731493578363.d55fca8381d823986a147abd80ee2bd3. after waiting 0 ms 2024-11-13T10:26:18,577 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1731493578363.d55fca8381d823986a147abd80ee2bd3. 2024-11-13T10:26:18,577 ERROR [Time-limited test {}] regionserver.HRegion(1960): Memstore data size is 1740 in region testReplayEditsWrittenViaHRegion,,1731493578363.d55fca8381d823986a147abd80ee2bd3. 2024-11-13T10:26:18,577 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1731493578363.d55fca8381d823986a147abd80ee2bd3. 2024-11-13T10:26:18,578 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for d55fca8381d823986a147abd80ee2bd3: Waiting for close lock at 1731493578576Disabling compacts and flushes for region at 1731493578576Disabling writes for close at 1731493578577 (+1 ms)Writing region close event to WAL at 1731493578577Closed at 1731493578577 2024-11-13T10:26:18,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741932_1111 (size=3346) 2024-11-13T10:26:18,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741932_1111 (size=3346) 2024-11-13T10:26:18,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741932_1111 (size=3346) 2024-11-13T10:26:18,607 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:41249/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731493578361/wal.1731493578426, size=3.3 K (3346bytes) 2024-11-13T10:26:18,607 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41249/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731493578361/wal.1731493578426 2024-11-13T10:26:18,608 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:41249/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731493578361/wal.1731493578426 after 0ms 2024-11-13T10:26:18,610 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:41249/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731493578361/wal.1731493578426: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-13T10:26:18,611 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:41249/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731493578361/wal.1731493578426 took 4ms 2024-11-13T10:26:18,613 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:41249/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731493578361/wal.1731493578426 so closing down 2024-11-13T10:26:18,613 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-13T10:26:18,614 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1731493578426.temp 2024-11-13T10:26:18,616 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/recovered.edits/0000000000000000003-wal.1731493578426.temp 2024-11-13T10:26:18,616 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-13T10:26:18,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741934_1113 (size=2944) 2024-11-13T10:26:18,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741934_1113 (size=2944) 2024-11-13T10:26:18,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741934_1113 (size=2944) 2024-11-13T10:26:18,629 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/recovered.edits/0000000000000000003-wal.1731493578426.temp (wrote 30 edits, skipped 0 edits in 0 ms) 2024-11-13T10:26:18,631 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/recovered.edits/0000000000000000003-wal.1731493578426.temp to hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/recovered.edits/0000000000000000035 2024-11-13T10:26:18,631 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 32 edits across 1 Regions in 20 ms; skipped=2; WAL=hdfs://localhost:41249/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731493578361/wal.1731493578426, size=3.3 K, length=3346, corrupted=false, cancelled=false 2024-11-13T10:26:18,631 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:41249/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731493578361/wal.1731493578426, journal: Splitting hdfs://localhost:41249/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731493578361/wal.1731493578426, size=3.3 K (3346bytes) at 1731493578607Finishing writing output for hdfs://localhost:41249/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731493578361/wal.1731493578426 so closing down at 1731493578613 (+6 ms)Creating recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/recovered.edits/0000000000000000003-wal.1731493578426.temp at 1731493578616 (+3 ms)3 split writer threads finished at 1731493578616Closed recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/recovered.edits/0000000000000000003-wal.1731493578426.temp (wrote 30 edits, skipped 0 edits in 0 ms) at 1731493578629 (+13 ms)Rename recovered edits hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/recovered.edits/0000000000000000003-wal.1731493578426.temp to hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/recovered.edits/0000000000000000035 at 1731493578631 (+2 ms)Processed 32 edits across 1 Regions in 20 ms; skipped=2; WAL=hdfs://localhost:41249/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731493578361/wal.1731493578426, size=3.3 K, length=3346, corrupted=false, cancelled=false at 1731493578631 2024-11-13T10:26:18,632 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:41249/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731493578361/wal.1731493578426 to hdfs://localhost:41249/hbase/oldWALs/wal.1731493578426 2024-11-13T10:26:18,633 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/recovered.edits/0000000000000000035 2024-11-13T10:26:18,633 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-13T10:26:18,635 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:41249/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731493578361, archiveDir=hdfs://localhost:41249/hbase/oldWALs, maxLogs=32 2024-11-13T10:26:18,648 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731493578361/wal.1731493578635, exclude list is [], retry=0 2024-11-13T10:26:18,651 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44787,DS-1af27950-119b-421a-9580-8a6fa7d1ebb0,DISK] 2024-11-13T10:26:18,651 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45097,DS-2ac43560-8e20-498a-852c-1b3a1f0157e9,DISK] 2024-11-13T10:26:18,652 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38649,DS-4caa984b-780d-4d07-9178-6a891c1e8e45,DISK] 2024-11-13T10:26:18,654 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731493578361/wal.1731493578635 2024-11-13T10:26:18,654 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35745:35745),(127.0.0.1/127.0.0.1:42913:42913),(127.0.0.1/127.0.0.1:45409:45409)] 2024-11-13T10:26:18,654 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => d55fca8381d823986a147abd80ee2bd3, NAME => 'testReplayEditsWrittenViaHRegion,,1731493578363.d55fca8381d823986a147abd80ee2bd3.', STARTKEY => '', ENDKEY => ''} 2024-11-13T10:26:18,654 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1731493578363.d55fca8381d823986a147abd80ee2bd3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T10:26:18,655 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for d55fca8381d823986a147abd80ee2bd3 2024-11-13T10:26:18,655 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for d55fca8381d823986a147abd80ee2bd3 2024-11-13T10:26:18,656 INFO [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region d55fca8381d823986a147abd80ee2bd3 2024-11-13T10:26:18,657 INFO [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d55fca8381d823986a147abd80ee2bd3 columnFamilyName a 2024-11-13T10:26:18,657 DEBUG [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:18,663 DEBUG [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/a/cd72a206788445798915d9bb0f693cc2 2024-11-13T10:26:18,663 INFO [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] regionserver.HStore(327): Store=d55fca8381d823986a147abd80ee2bd3/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:18,663 INFO [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region d55fca8381d823986a147abd80ee2bd3 2024-11-13T10:26:18,664 INFO [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d55fca8381d823986a147abd80ee2bd3 columnFamilyName b 2024-11-13T10:26:18,664 DEBUG [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:18,664 INFO [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] regionserver.HStore(327): Store=d55fca8381d823986a147abd80ee2bd3/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:18,664 INFO [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region d55fca8381d823986a147abd80ee2bd3 2024-11-13T10:26:18,665 INFO [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d55fca8381d823986a147abd80ee2bd3 columnFamilyName c 2024-11-13T10:26:18,665 DEBUG [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:18,666 INFO [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] regionserver.HStore(327): Store=d55fca8381d823986a147abd80ee2bd3/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:18,666 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for d55fca8381d823986a147abd80ee2bd3 2024-11-13T10:26:18,666 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3 2024-11-13T10:26:18,668 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3 2024-11-13T10:26:18,668 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/recovered.edits/0000000000000000035 2024-11-13T10:26:18,670 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/recovered.edits/0000000000000000035: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-13T10:26:18,672 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 20, skipped 10, firstSequenceIdInLog=3, maxSequenceIdInLog=35, path=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/recovered.edits/0000000000000000035 2024-11-13T10:26:18,672 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing d55fca8381d823986a147abd80ee2bd3 3/3 column families, dataSize=1.70 KB heapSize=3.88 KB 2024-11-13T10:26:18,687 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/.tmp/b/9c6248efa76c40b39581c5cddf05ce84 is 91, key is testReplayEditsWrittenViaHRegion/b:x0/1731493578559/Put/seqid=0 2024-11-13T10:26:18,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741936_1115 (size=5958) 2024-11-13T10:26:18,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741936_1115 (size=5958) 2024-11-13T10:26:18,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741936_1115 (size=5958) 2024-11-13T10:26:18,697 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=35 (bloomFilter=true), to=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/.tmp/b/9c6248efa76c40b39581c5cddf05ce84 2024-11-13T10:26:18,718 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/.tmp/c/608c3bd05ac04fd79ceaed52ff185e27 is 91, key is testReplayEditsWrittenViaHRegion/c:x0/1731493578566/Put/seqid=0 2024-11-13T10:26:18,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741937_1116 (size=5958) 2024-11-13T10:26:18,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741937_1116 (size=5958) 2024-11-13T10:26:18,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741937_1116 (size=5958) 2024-11-13T10:26:18,728 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=35 (bloomFilter=true), to=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/.tmp/c/608c3bd05ac04fd79ceaed52ff185e27 2024-11-13T10:26:18,735 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/.tmp/b/9c6248efa76c40b39581c5cddf05ce84 as hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/b/9c6248efa76c40b39581c5cddf05ce84 2024-11-13T10:26:18,740 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/b/9c6248efa76c40b39581c5cddf05ce84, entries=10, sequenceid=35, filesize=5.8 K 2024-11-13T10:26:18,741 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/.tmp/c/608c3bd05ac04fd79ceaed52ff185e27 as hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/c/608c3bd05ac04fd79ceaed52ff185e27 2024-11-13T10:26:18,747 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/c/608c3bd05ac04fd79ceaed52ff185e27, entries=10, sequenceid=35, filesize=5.8 K 2024-11-13T10:26:18,747 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.70 KB/1740, heapSize ~3.59 KB/3680, currentSize=0 B/0 for d55fca8381d823986a147abd80ee2bd3 in 75ms, sequenceid=35, compaction requested=false; wal=null 2024-11-13T10:26:18,748 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/recovered.edits/0000000000000000035 2024-11-13T10:26:18,749 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for d55fca8381d823986a147abd80ee2bd3 2024-11-13T10:26:18,749 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for d55fca8381d823986a147abd80ee2bd3 2024-11-13T10:26:18,750 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-13T10:26:18,751 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for d55fca8381d823986a147abd80ee2bd3 2024-11-13T10:26:18,753 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/recovered.edits/35.seqid, newMaxSeqId=35, maxSeqId=1 2024-11-13T10:26:18,754 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened d55fca8381d823986a147abd80ee2bd3; next sequenceid=36; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60305894, jitterRate=-0.10137215256690979}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-13T10:26:18,754 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for d55fca8381d823986a147abd80ee2bd3: Writing region info on filesystem at 1731493578655Initializing all the Stores at 1731493578656 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493578656Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493578656Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493578656Obtaining lock to block concurrent updates at 1731493578672 (+16 ms)Preparing flush snapshotting stores in d55fca8381d823986a147abd80ee2bd3 at 1731493578672Finished memstore snapshotting testReplayEditsWrittenViaHRegion,,1731493578363.d55fca8381d823986a147abd80ee2bd3., syncing WAL and waiting on mvcc, flushsize=dataSize=1740, getHeapSize=3920, getOffHeapSize=0, getCellsCount=20 at 1731493578672Flushing stores of testReplayEditsWrittenViaHRegion,,1731493578363.d55fca8381d823986a147abd80ee2bd3. at 1731493578672Flushing d55fca8381d823986a147abd80ee2bd3/b: creating writer at 1731493578672Flushing d55fca8381d823986a147abd80ee2bd3/b: appending metadata at 1731493578686 (+14 ms)Flushing d55fca8381d823986a147abd80ee2bd3/b: closing flushed file at 1731493578687 (+1 ms)Flushing d55fca8381d823986a147abd80ee2bd3/c: creating writer at 1731493578702 (+15 ms)Flushing d55fca8381d823986a147abd80ee2bd3/c: appending metadata at 1731493578717 (+15 ms)Flushing d55fca8381d823986a147abd80ee2bd3/c: closing flushed file at 1731493578717Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7d52dd07: reopening flushed file at 1731493578734 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2ead7200: reopening flushed file at 1731493578740 (+6 ms)Finished flush of dataSize ~1.70 KB/1740, heapSize ~3.59 KB/3680, currentSize=0 B/0 for d55fca8381d823986a147abd80ee2bd3 in 75ms, sequenceid=35, compaction requested=false; wal=null at 1731493578747 (+7 ms)Cleaning up temporary data from old regions at 1731493578749 (+2 ms)Region opened successfully at 1731493578754 (+5 ms) 2024-11-13T10:26:18,826 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:41249/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731493578361/wal.1731493578635, size=0 (0bytes) 2024-11-13T10:26:18,826 WARN [Time-limited test {}] wal.WALSplitter(453): File hdfs://localhost:41249/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731493578361/wal.1731493578635 might be still open, length is 0 2024-11-13T10:26:18,826 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41249/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731493578361/wal.1731493578635 2024-11-13T10:26:18,826 WARN [IPC Server handler 1 on default port 41249 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731493578361/wal.1731493578635 has not been closed. Lease recovery is in progress. RecoveryId = 1117 for block blk_1073741935_1114 2024-11-13T10:26:18,826 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41249/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731493578361/wal.1731493578635 after 0ms 2024-11-13T10:26:21,062 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testDatalossWhenInputError 2024-11-13T10:26:21,063 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testDatalossWhenInputError Metrics about Tables on a single HBase RegionServer 2024-11-13T10:26:21,063 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testCompactedBulkLoadedFiles 2024-11-13T10:26:21,063 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testCompactedBulkLoadedFiles Metrics about Tables on a single HBase RegionServer 2024-11-13T10:26:21,259 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2118105142_22 at /127.0.0.1:52524 [Receiving block BP-357896810-172.17.0.2-1731493536919:blk_1073741935_1114] {}] datanode.DataXceiver(331): 127.0.0.1:44787:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52524 dst: /127.0.0.1:44787 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:44787 remote=/127.0.0.1:52524]. Total timeout mills is 60000, 57527 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T10:26:21,259 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2118105142_22 at /127.0.0.1:50538 [Receiving block BP-357896810-172.17.0.2-1731493536919:blk_1073741935_1114] {}] datanode.DataXceiver(331): 127.0.0.1:38649:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50538 dst: /127.0.0.1:38649 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T10:26:21,259 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2118105142_22 at /127.0.0.1:55412 [Receiving block BP-357896810-172.17.0.2-1731493536919:blk_1073741935_1114] {}] datanode.DataXceiver(331): 127.0.0.1:45097:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55412 dst: /127.0.0.1:45097 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T10:26:21,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741935_1117 (size=2936) 2024-11-13T10:26:21,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741935_1117 (size=2936) 2024-11-13T10:26:21,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741935_1117 (size=2936) 2024-11-13T10:26:22,827 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:41249/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731493578361/wal.1731493578635 after 4001ms 2024-11-13T10:26:22,831 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:41249/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731493578361/wal.1731493578635: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-13T10:26:22,831 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:41249/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731493578361/wal.1731493578635 took 4006ms 2024-11-13T10:26:22,834 INFO [Time-limited test {}] wal.WALSplitter(493): EOF from hdfs://localhost:41249/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731493578361/wal.1731493578635; continuing. 2024-11-13T10:26:22,834 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:41249/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731493578361/wal.1731493578635 so closing down 2024-11-13T10:26:22,834 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-13T10:26:22,835 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000037-wal.1731493578635.temp 2024-11-13T10:26:22,836 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/recovered.edits/0000000000000000037-wal.1731493578635.temp 2024-11-13T10:26:22,837 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-13T10:26:22,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741938_1118 (size=2944) 2024-11-13T10:26:22,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741938_1118 (size=2944) 2024-11-13T10:26:22,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741938_1118 (size=2944) 2024-11-13T10:26:22,844 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/recovered.edits/0000000000000000037-wal.1731493578635.temp (wrote 30 edits, skipped 0 edits in 0 ms) 2024-11-13T10:26:22,846 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/recovered.edits/0000000000000000037-wal.1731493578635.temp to hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/recovered.edits/0000000000000000066 2024-11-13T10:26:22,846 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 30 edits across 1 Regions in 15 ms; skipped=0; WAL=hdfs://localhost:41249/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731493578361/wal.1731493578635, size=0, length=0, corrupted=false, cancelled=false 2024-11-13T10:26:22,846 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:41249/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731493578361/wal.1731493578635, journal: Splitting hdfs://localhost:41249/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731493578361/wal.1731493578635, size=0 (0bytes) at 1731493578826Finishing writing output for hdfs://localhost:41249/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731493578361/wal.1731493578635 so closing down at 1731493582834 (+4008 ms)Creating recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/recovered.edits/0000000000000000037-wal.1731493578635.temp at 1731493582836 (+2 ms)3 split writer threads finished at 1731493582837 (+1 ms)Closed recovered edits writer path=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/recovered.edits/0000000000000000037-wal.1731493578635.temp (wrote 30 edits, skipped 0 edits in 0 ms) at 1731493582844 (+7 ms)Rename recovered edits hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/recovered.edits/0000000000000000037-wal.1731493578635.temp to hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/recovered.edits/0000000000000000066 at 1731493582846 (+2 ms)Processed 30 edits across 1 Regions in 15 ms; skipped=0; WAL=hdfs://localhost:41249/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731493578361/wal.1731493578635, size=0, length=0, corrupted=false, cancelled=false at 1731493582846 2024-11-13T10:26:22,848 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:41249/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731493578361/wal.1731493578635 to hdfs://localhost:41249/hbase/oldWALs/wal.1731493578635 2024-11-13T10:26:22,849 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/recovered.edits/0000000000000000066 2024-11-13T10:26:22,849 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-13T10:26:22,851 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:41249/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731493578361, archiveDir=hdfs://localhost:41249/hbase/oldWALs, maxLogs=32 2024-11-13T10:26:22,865 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731493578361/wal.1731493582852, exclude list is [], retry=0 2024-11-13T10:26:22,868 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44787,DS-1af27950-119b-421a-9580-8a6fa7d1ebb0,DISK] 2024-11-13T10:26:22,868 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38649,DS-4caa984b-780d-4d07-9178-6a891c1e8e45,DISK] 2024-11-13T10:26:22,868 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45097,DS-2ac43560-8e20-498a-852c-1b3a1f0157e9,DISK] 2024-11-13T10:26:22,870 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731493578361/wal.1731493582852 2024-11-13T10:26:22,871 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35745:35745),(127.0.0.1/127.0.0.1:45409:45409),(127.0.0.1/127.0.0.1:42913:42913)] 2024-11-13T10:26:22,871 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1731493578363.d55fca8381d823986a147abd80ee2bd3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T10:26:22,873 INFO [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region d55fca8381d823986a147abd80ee2bd3 2024-11-13T10:26:22,874 INFO [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d55fca8381d823986a147abd80ee2bd3 columnFamilyName a 2024-11-13T10:26:22,874 DEBUG [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:22,880 DEBUG [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/a/cd72a206788445798915d9bb0f693cc2 2024-11-13T10:26:22,880 INFO [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] regionserver.HStore(327): Store=d55fca8381d823986a147abd80ee2bd3/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:22,880 INFO [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region d55fca8381d823986a147abd80ee2bd3 2024-11-13T10:26:22,881 INFO [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d55fca8381d823986a147abd80ee2bd3 columnFamilyName b 2024-11-13T10:26:22,881 DEBUG [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:22,886 DEBUG [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/b/9c6248efa76c40b39581c5cddf05ce84 2024-11-13T10:26:22,887 INFO [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] regionserver.HStore(327): Store=d55fca8381d823986a147abd80ee2bd3/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:22,887 INFO [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region d55fca8381d823986a147abd80ee2bd3 2024-11-13T10:26:22,888 INFO [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d55fca8381d823986a147abd80ee2bd3 columnFamilyName c 2024-11-13T10:26:22,888 DEBUG [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T10:26:22,893 DEBUG [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/c/608c3bd05ac04fd79ceaed52ff185e27 2024-11-13T10:26:22,893 INFO [StoreOpener-d55fca8381d823986a147abd80ee2bd3-1 {}] regionserver.HStore(327): Store=d55fca8381d823986a147abd80ee2bd3/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T10:26:22,893 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for d55fca8381d823986a147abd80ee2bd3 2024-11-13T10:26:22,894 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3 2024-11-13T10:26:22,896 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3 2024-11-13T10:26:22,896 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/recovered.edits/0000000000000000066 2024-11-13T10:26:22,898 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/recovered.edits/0000000000000000066: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-13T10:26:22,904 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 30, skipped 0, firstSequenceIdInLog=37, maxSequenceIdInLog=66, path=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/recovered.edits/0000000000000000066 2024-11-13T10:26:22,904 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing d55fca8381d823986a147abd80ee2bd3 3/3 column families, dataSize=2.55 KB heapSize=5.44 KB 2024-11-13T10:26:22,919 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/.tmp/a/300a31329cdd4853b6b66603b66f33f6 is 91, key is testReplayEditsWrittenViaHRegion/a:y0/1731493578762/Put/seqid=0 2024-11-13T10:26:22,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741940_1120 (size=5958) 2024-11-13T10:26:22,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741940_1120 (size=5958) 2024-11-13T10:26:22,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741940_1120 (size=5958) 2024-11-13T10:26:22,928 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/.tmp/a/300a31329cdd4853b6b66603b66f33f6 2024-11-13T10:26:22,958 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/.tmp/b/ab39d63015be4d9494b01fc92e7d677e is 91, key is testReplayEditsWrittenViaHRegion/b:y0/1731493578769/Put/seqid=0 2024-11-13T10:26:22,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741941_1121 (size=5958) 2024-11-13T10:26:22,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741941_1121 (size=5958) 2024-11-13T10:26:22,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741941_1121 (size=5958) 2024-11-13T10:26:22,971 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/.tmp/b/ab39d63015be4d9494b01fc92e7d677e 2024-11-13T10:26:23,009 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/.tmp/c/a7e4ace649424e7899cf60840020c46e is 91, key is testReplayEditsWrittenViaHRegion/c:y0/1731493578778/Put/seqid=0 2024-11-13T10:26:23,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741942_1122 (size=5958) 2024-11-13T10:26:23,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741942_1122 (size=5958) 2024-11-13T10:26:23,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741942_1122 (size=5958) 2024-11-13T10:26:23,019 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/.tmp/c/a7e4ace649424e7899cf60840020c46e 2024-11-13T10:26:23,026 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/.tmp/a/300a31329cdd4853b6b66603b66f33f6 as hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/a/300a31329cdd4853b6b66603b66f33f6 2024-11-13T10:26:23,033 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/a/300a31329cdd4853b6b66603b66f33f6, entries=10, sequenceid=66, filesize=5.8 K 2024-11-13T10:26:23,038 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/.tmp/b/ab39d63015be4d9494b01fc92e7d677e as hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/b/ab39d63015be4d9494b01fc92e7d677e 2024-11-13T10:26:23,045 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/b/ab39d63015be4d9494b01fc92e7d677e, entries=10, sequenceid=66, filesize=5.8 K 2024-11-13T10:26:23,046 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/.tmp/c/a7e4ace649424e7899cf60840020c46e as hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/c/a7e4ace649424e7899cf60840020c46e 2024-11-13T10:26:23,053 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/c/a7e4ace649424e7899cf60840020c46e, entries=10, sequenceid=66, filesize=5.8 K 2024-11-13T10:26:23,053 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~2.55 KB/2610, heapSize ~5.39 KB/5520, currentSize=0 B/0 for d55fca8381d823986a147abd80ee2bd3 in 149ms, sequenceid=66, compaction requested=false; wal=null 2024-11-13T10:26:23,054 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/recovered.edits/0000000000000000066 2024-11-13T10:26:23,055 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for d55fca8381d823986a147abd80ee2bd3 2024-11-13T10:26:23,055 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for d55fca8381d823986a147abd80ee2bd3 2024-11-13T10:26:23,056 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-13T10:26:23,058 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for d55fca8381d823986a147abd80ee2bd3 2024-11-13T10:26:23,061 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41249/hbase/data/default/testReplayEditsWrittenViaHRegion/d55fca8381d823986a147abd80ee2bd3/recovered.edits/66.seqid, newMaxSeqId=66, maxSeqId=35 2024-11-13T10:26:23,062 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened d55fca8381d823986a147abd80ee2bd3; next sequenceid=67; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61269387, jitterRate=-0.08701498806476593}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-13T10:26:23,062 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for d55fca8381d823986a147abd80ee2bd3: Writing region info on filesystem at 1731493582871Initializing all the Stores at 1731493582872 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493582872Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493582872Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731493582872Obtaining lock to block concurrent updates at 1731493582904 (+32 ms)Preparing flush snapshotting stores in d55fca8381d823986a147abd80ee2bd3 at 1731493582904Finished memstore snapshotting testReplayEditsWrittenViaHRegion,,1731493578363.d55fca8381d823986a147abd80ee2bd3., syncing WAL and waiting on mvcc, flushsize=dataSize=2610, getHeapSize=5520, getOffHeapSize=0, getCellsCount=30 at 1731493582904Flushing stores of testReplayEditsWrittenViaHRegion,,1731493578363.d55fca8381d823986a147abd80ee2bd3. at 1731493582904Flushing d55fca8381d823986a147abd80ee2bd3/a: creating writer at 1731493582904Flushing d55fca8381d823986a147abd80ee2bd3/a: appending metadata at 1731493582918 (+14 ms)Flushing d55fca8381d823986a147abd80ee2bd3/a: closing flushed file at 1731493582919 (+1 ms)Flushing d55fca8381d823986a147abd80ee2bd3/b: creating writer at 1731493582933 (+14 ms)Flushing d55fca8381d823986a147abd80ee2bd3/b: appending metadata at 1731493582957 (+24 ms)Flushing d55fca8381d823986a147abd80ee2bd3/b: closing flushed file at 1731493582957Flushing d55fca8381d823986a147abd80ee2bd3/c: creating writer at 1731493582981 (+24 ms)Flushing d55fca8381d823986a147abd80ee2bd3/c: appending metadata at 1731493583008 (+27 ms)Flushing d55fca8381d823986a147abd80ee2bd3/c: closing flushed file at 1731493583008Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1f80d5bc: reopening flushed file at 1731493583025 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7e8bf9f7: reopening flushed file at 1731493583033 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@58533b04: reopening flushed file at 1731493583045 (+12 ms)Finished flush of dataSize ~2.55 KB/2610, heapSize ~5.39 KB/5520, currentSize=0 B/0 for d55fca8381d823986a147abd80ee2bd3 in 149ms, sequenceid=66, compaction requested=false; wal=null at 1731493583053 (+8 ms)Cleaning up temporary data from old regions at 1731493583055 (+2 ms)Region opened successfully at 1731493583062 (+7 ms) 2024-11-13T10:26:23,080 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing d55fca8381d823986a147abd80ee2bd3, disabling compactions & flushes 2024-11-13T10:26:23,080 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1731493578363.d55fca8381d823986a147abd80ee2bd3. 2024-11-13T10:26:23,080 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1731493578363.d55fca8381d823986a147abd80ee2bd3. 2024-11-13T10:26:23,080 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1731493578363.d55fca8381d823986a147abd80ee2bd3. after waiting 0 ms 2024-11-13T10:26:23,080 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1731493578363.d55fca8381d823986a147abd80ee2bd3. 2024-11-13T10:26:23,082 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1731493578363.d55fca8381d823986a147abd80ee2bd3. 2024-11-13T10:26:23,082 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for d55fca8381d823986a147abd80ee2bd3: Waiting for close lock at 1731493583080Disabling compacts and flushes for region at 1731493583080Disabling writes for close at 1731493583080Writing region close event to WAL at 1731493583082 (+2 ms)Closed at 1731493583082 2024-11-13T10:26:23,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741939_1119 (size=95) 2024-11-13T10:26:23,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741939_1119 (size=95) 2024-11-13T10:26:23,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741939_1119 (size=95) 2024-11-13T10:26:23,096 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-13T10:26:23,096 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1731493582852) 2024-11-13T10:26:23,120 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsWrittenViaHRegion Thread=438 (was 432) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1505282874_22 at /127.0.0.1:42500 [Waiting for operation #14] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-28-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-28-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1505282874_22 at /127.0.0.1:34592 [Waiting for operation #13] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1396088596) connection to localhost/127.0.0.1:41249 from jenkinstestReplayEditsWrittenViaHRegion java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkinstestReplayEditsWrittenViaHRegion@localhost:41249 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-28-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1505282874_22 at /127.0.0.1:59646 [Waiting for operation #16] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1396 (was 1330) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=566 (was 589), ProcessCount=11 (was 11), AvailableMemoryMB=270 (was 269) - AvailableMemoryMB LEAK? - 2024-11-13T10:26:23,121 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1396 is superior to 1024 2024-11-13T10:26:23,121 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-13T10:26:23,121 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T10:26:23,121 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T10:26:23,121 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T10:26:23,122 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T10:26:23,122 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T10:26:23,122 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-13T10:26:23,122 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1167627535, stopped=false 2024-11-13T10:26:23,123 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=770665a7984d,45401,1731493540547 2024-11-13T10:26:23,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45401-0x10110dc99880000, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T10:26:23,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36821-0x10110dc99880002, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T10:26:23,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46143-0x10110dc99880003, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T10:26:23,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36821-0x10110dc99880002, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T10:26:23,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46143-0x10110dc99880003, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T10:26:23,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45401-0x10110dc99880000, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T10:26:23,126 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T10:26:23,126 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T10:26:23,126 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T10:26:23,126 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36821-0x10110dc99880002, quorum=127.0.0.1:51925, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T10:26:23,126 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T10:26:23,127 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '770665a7984d,36821,1731493541562' ***** 2024-11-13T10:26:23,127 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-13T10:26:23,127 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45401-0x10110dc99880000, quorum=127.0.0.1:51925, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T10:26:23,127 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46143-0x10110dc99880003, quorum=127.0.0.1:51925, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T10:26:23,127 INFO [RS:1;770665a7984d:36821 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-13T10:26:23,127 INFO [RS:1;770665a7984d:36821 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-13T10:26:23,127 INFO [RS:1;770665a7984d:36821 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-13T10:26:23,127 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-13T10:26:23,128 INFO [RS:1;770665a7984d:36821 {}] regionserver.HRegionServer(959): stopping server 770665a7984d,36821,1731493541562 2024-11-13T10:26:23,128 INFO [RS:1;770665a7984d:36821 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T10:26:23,128 INFO [RS:1;770665a7984d:36821 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;770665a7984d:36821. 2024-11-13T10:26:23,128 DEBUG [RS:1;770665a7984d:36821 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T10:26:23,128 DEBUG [RS:1;770665a7984d:36821 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T10:26:23,128 INFO [RS:1;770665a7984d:36821 {}] regionserver.HRegionServer(976): stopping server 770665a7984d,36821,1731493541562; all regions closed. 2024-11-13T10:26:23,128 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '770665a7984d,46143,1731493541627' ***** 2024-11-13T10:26:23,128 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-13T10:26:23,128 INFO [RS:2;770665a7984d:46143 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-13T10:26:23,129 INFO [RS:2;770665a7984d:46143 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-13T10:26:23,129 INFO [RS:2;770665a7984d:46143 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-13T10:26:23,129 INFO [RS:2;770665a7984d:46143 {}] regionserver.HRegionServer(3091): Received CLOSE for 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:26:23,129 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-13T10:26:23,130 INFO [RS:2;770665a7984d:46143 {}] regionserver.HRegionServer(959): stopping server 770665a7984d,46143,1731493541627 2024-11-13T10:26:23,130 INFO [RS:2;770665a7984d:46143 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T10:26:23,130 INFO [RS:2;770665a7984d:46143 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;770665a7984d:46143. 2024-11-13T10:26:23,130 DEBUG [RS:2;770665a7984d:46143 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T10:26:23,130 DEBUG [RS:2;770665a7984d:46143 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T10:26:23,130 DEBUG [RS_CLOSE_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 70a598aa9b18017afa50633b8eb231df, disabling compactions & flushes 2024-11-13T10:26:23,130 INFO [RS:2;770665a7984d:46143 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-13T10:26:23,130 INFO [RS:2;770665a7984d:46143 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-13T10:26:23,130 INFO [RS_CLOSE_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. 2024-11-13T10:26:23,130 INFO [RS:2;770665a7984d:46143 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-13T10:26:23,130 DEBUG [RS_CLOSE_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. 2024-11-13T10:26:23,130 INFO [RS:2;770665a7984d:46143 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-13T10:26:23,130 DEBUG [RS_CLOSE_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. after waiting 0 ms 2024-11-13T10:26:23,130 DEBUG [RS_CLOSE_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. 2024-11-13T10:26:23,131 INFO [RS:2;770665a7984d:46143 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-13T10:26:23,131 DEBUG [RS:2;770665a7984d:46143 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 70a598aa9b18017afa50633b8eb231df=testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df.} 2024-11-13T10:26:23,131 DEBUG [RS:2;770665a7984d:46143 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 70a598aa9b18017afa50633b8eb231df 2024-11-13T10:26:23,131 DEBUG [RS_CLOSE_META-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T10:26:23,131 INFO [RS_CLOSE_META-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T10:26:23,131 DEBUG [RS_CLOSE_META-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T10:26:23,131 DEBUG [RS_CLOSE_META-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T10:26:23,131 DEBUG [RS_CLOSE_META-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T10:26:23,132 INFO [RS_CLOSE_META-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.19 KB heapSize=2.79 KB 2024-11-13T10:26:23,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741834_1010 (size=1708) 2024-11-13T10:26:23,140 DEBUG [RS_CLOSE_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/default/testReplayEditsAfterRegionMovedWithMultiCF/70a598aa9b18017afa50633b8eb231df/recovered.edits/20.seqid, newMaxSeqId=20, maxSeqId=17 2024-11-13T10:26:23,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741834_1010 (size=1708) 2024-11-13T10:26:23,141 INFO [RS_CLOSE_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. 2024-11-13T10:26:23,141 DEBUG [RS_CLOSE_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 70a598aa9b18017afa50633b8eb231df: Waiting for close lock at 1731493583130Running coprocessor pre-close hooks at 1731493583130Disabling compacts and flushes for region at 1731493583130Disabling writes for close at 1731493583130Writing region close event to WAL at 1731493583132 (+2 ms)Running coprocessor post-close hooks at 1731493583141 (+9 ms)Closed at 1731493583141 2024-11-13T10:26:23,141 DEBUG [RS_CLOSE_REGION-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df. 2024-11-13T10:26:23,143 DEBUG [RS:1;770665a7984d:36821 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/oldWALs 2024-11-13T10:26:23,143 INFO [RS:1;770665a7984d:36821 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 770665a7984d%2C36821%2C1731493541562:(num 1731493543503) 2024-11-13T10:26:23,143 DEBUG [RS:1;770665a7984d:36821 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T10:26:23,143 INFO [RS:1;770665a7984d:36821 {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T10:26:23,143 INFO [RS:1;770665a7984d:36821 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T10:26:23,144 INFO [RS:1;770665a7984d:36821 {}] hbase.ChoreService(370): Chore service for: regionserver/770665a7984d:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-13T10:26:23,144 DEBUG [RS_CLOSE_META-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/hbase/meta/1588230740/.tmp/info/5f80ff917d424f178778c317f4128b21 is 205, key is testReplayEditsAfterRegionMovedWithMultiCF,,1731493558828.70a598aa9b18017afa50633b8eb231df./info:regioninfo/1731493562808/Put/seqid=0 2024-11-13T10:26:23,144 INFO [RS:1;770665a7984d:36821 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-13T10:26:23,144 INFO [RS:1;770665a7984d:36821 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-13T10:26:23,144 INFO [RS:1;770665a7984d:36821 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-13T10:26:23,144 INFO [regionserver/770665a7984d:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T10:26:23,144 INFO [RS:1;770665a7984d:36821 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T10:26:23,144 INFO [RS:1;770665a7984d:36821 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36821 2024-11-13T10:26:23,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741834_1010 (size=1708) 2024-11-13T10:26:23,148 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36821-0x10110dc99880002, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/770665a7984d,36821,1731493541562 2024-11-13T10:26:23,148 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45401-0x10110dc99880000, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T10:26:23,148 INFO [RS:1;770665a7984d:36821 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T10:26:23,150 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [770665a7984d,36821,1731493541562] 2024-11-13T10:26:23,151 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/770665a7984d,36821,1731493541562 already deleted, retry=false 2024-11-13T10:26:23,151 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 770665a7984d,36821,1731493541562 expired; onlineServers=1 2024-11-13T10:26:23,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741943_1123 (size=6778) 2024-11-13T10:26:23,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741943_1123 (size=6778) 2024-11-13T10:26:23,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741943_1123 (size=6778) 2024-11-13T10:26:23,156 INFO [RS_CLOSE_META-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.19 KB at sequenceid=23 (bloomFilter=true), to=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/hbase/meta/1588230740/.tmp/info/5f80ff917d424f178778c317f4128b21 2024-11-13T10:26:23,164 DEBUG [RS_CLOSE_META-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/hbase/meta/1588230740/.tmp/info/5f80ff917d424f178778c317f4128b21 as hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/hbase/meta/1588230740/info/5f80ff917d424f178778c317f4128b21 2024-11-13T10:26:23,171 INFO [regionserver/770665a7984d:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T10:26:23,171 INFO [RS_CLOSE_META-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/hbase/meta/1588230740/info/5f80ff917d424f178778c317f4128b21, entries=8, sequenceid=23, filesize=6.6 K 2024-11-13T10:26:23,172 INFO [RS_CLOSE_META-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.19 KB/1218, heapSize ~2.02 KB/2072, currentSize=0 B/0 for 1588230740 in 41ms, sequenceid=23, compaction requested=false 2024-11-13T10:26:23,178 DEBUG [RS_CLOSE_META-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/data/hbase/meta/1588230740/recovered.edits/26.seqid, newMaxSeqId=26, maxSeqId=18 2024-11-13T10:26:23,179 DEBUG [RS_CLOSE_META-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T10:26:23,179 INFO [RS_CLOSE_META-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T10:26:23,179 DEBUG [RS_CLOSE_META-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731493583131Running coprocessor pre-close hooks at 1731493583131Disabling compacts and flushes for region at 1731493583131Disabling writes for close at 1731493583131Obtaining lock to block concurrent updates at 1731493583132 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731493583132Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1218, getHeapSize=2792, getOffHeapSize=0, getCellsCount=8 at 1731493583133 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731493583135 (+2 ms)Flushing 1588230740/info: creating writer at 1731493583135Flushing 1588230740/info: appending metadata at 1731493583143 (+8 ms)Flushing 1588230740/info: closing flushed file at 1731493583143Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@70fdad31: reopening flushed file at 1731493583163 (+20 ms)Finished flush of dataSize ~1.19 KB/1218, heapSize ~2.02 KB/2072, currentSize=0 B/0 for 1588230740 in 41ms, sequenceid=23, compaction requested=false at 1731493583173 (+10 ms)Writing region close event to WAL at 1731493583174 (+1 ms)Running coprocessor post-close hooks at 1731493583179 (+5 ms)Closed at 1731493583179 2024-11-13T10:26:23,179 DEBUG [RS_CLOSE_META-regionserver/770665a7984d:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-13T10:26:23,190 INFO [regionserver/770665a7984d:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T10:26:23,250 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36821-0x10110dc99880002, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T10:26:23,250 INFO [RS:1;770665a7984d:36821 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T10:26:23,250 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36821-0x10110dc99880002, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T10:26:23,250 INFO [RS:1;770665a7984d:36821 {}] regionserver.HRegionServer(1031): Exiting; stopping=770665a7984d,36821,1731493541562; zookeeper connection closed. 2024-11-13T10:26:23,250 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@56cf31bc {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@56cf31bc 2024-11-13T10:26:23,311 INFO [regionserver/770665a7984d:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-13T10:26:23,311 INFO [regionserver/770665a7984d:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-13T10:26:23,331 INFO [RS:2;770665a7984d:46143 {}] regionserver.HRegionServer(976): stopping server 770665a7984d,46143,1731493541627; all regions closed. 2024-11-13T10:26:23,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741893_1071 (size=1673) 2024-11-13T10:26:23,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741893_1071 (size=1673) 2024-11-13T10:26:23,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741893_1071 (size=1673) 2024-11-13T10:26:23,337 DEBUG [RS:2;770665a7984d:46143 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/oldWALs 2024-11-13T10:26:23,337 INFO [RS:2;770665a7984d:46143 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 770665a7984d%2C46143%2C1731493541627.meta:.meta(num 1731493561931) 2024-11-13T10:26:23,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741835_1011 (size=723) 2024-11-13T10:26:23,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741835_1011 (size=723) 2024-11-13T10:26:23,341 DEBUG [RS:2;770665a7984d:46143 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/oldWALs 2024-11-13T10:26:23,341 INFO [RS:2;770665a7984d:46143 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 770665a7984d%2C46143%2C1731493541627:(num 1731493543503) 2024-11-13T10:26:23,341 DEBUG [RS:2;770665a7984d:46143 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T10:26:23,341 INFO [RS:2;770665a7984d:46143 {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T10:26:23,341 INFO [RS:2;770665a7984d:46143 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T10:26:23,342 INFO [RS:2;770665a7984d:46143 {}] hbase.ChoreService(370): Chore service for: regionserver/770665a7984d:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-13T10:26:23,342 INFO [RS:2;770665a7984d:46143 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T10:26:23,342 INFO [regionserver/770665a7984d:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T10:26:23,342 INFO [RS:2;770665a7984d:46143 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46143 2024-11-13T10:26:23,344 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45401-0x10110dc99880000, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T10:26:23,344 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46143-0x10110dc99880003, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/770665a7984d,46143,1731493541627 2024-11-13T10:26:23,344 INFO [RS:2;770665a7984d:46143 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T10:26:23,345 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [770665a7984d,46143,1731493541627] 2024-11-13T10:26:23,348 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/770665a7984d,46143,1731493541627 already deleted, retry=false 2024-11-13T10:26:23,348 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 770665a7984d,46143,1731493541627 expired; onlineServers=0 2024-11-13T10:26:23,348 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '770665a7984d,45401,1731493540547' ***** 2024-11-13T10:26:23,348 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-13T10:26:23,348 INFO [M:0;770665a7984d:45401 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T10:26:23,348 INFO [M:0;770665a7984d:45401 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T10:26:23,349 DEBUG [M:0;770665a7984d:45401 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-13T10:26:23,349 DEBUG [M:0;770665a7984d:45401 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-13T10:26:23,349 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-13T10:26:23,349 DEBUG [master/770665a7984d:0:becomeActiveMaster-HFileCleaner.small.0-1731493543149 {}] cleaner.HFileCleaner(306): Exit Thread[master/770665a7984d:0:becomeActiveMaster-HFileCleaner.small.0-1731493543149,5,FailOnTimeoutGroup] 2024-11-13T10:26:23,349 DEBUG [master/770665a7984d:0:becomeActiveMaster-HFileCleaner.large.0-1731493543143 {}] cleaner.HFileCleaner(306): Exit Thread[master/770665a7984d:0:becomeActiveMaster-HFileCleaner.large.0-1731493543143,5,FailOnTimeoutGroup] 2024-11-13T10:26:23,349 INFO [M:0;770665a7984d:45401 {}] hbase.ChoreService(370): Chore service for: master/770665a7984d:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-13T10:26:23,349 INFO [M:0;770665a7984d:45401 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T10:26:23,349 DEBUG [M:0;770665a7984d:45401 {}] master.HMaster(1795): Stopping service threads 2024-11-13T10:26:23,349 INFO [M:0;770665a7984d:45401 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-13T10:26:23,350 INFO [M:0;770665a7984d:45401 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T10:26:23,350 INFO [M:0;770665a7984d:45401 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-13T10:26:23,351 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45401-0x10110dc99880000, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-13T10:26:23,351 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45401-0x10110dc99880000, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T10:26:23,351 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-13T10:26:23,351 DEBUG [M:0;770665a7984d:45401 {}] zookeeper.ZKUtil(347): master:45401-0x10110dc99880000, quorum=127.0.0.1:51925, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-13T10:26:23,352 WARN [M:0;770665a7984d:45401 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-13T10:26:23,353 INFO [M:0;770665a7984d:45401 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/.lastflushedseqids 2024-11-13T10:26:23,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741944_1124 (size=119) 2024-11-13T10:26:23,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741944_1124 (size=119) 2024-11-13T10:26:23,372 INFO [M:0;770665a7984d:45401 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-13T10:26:23,372 INFO [M:0;770665a7984d:45401 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-13T10:26:23,373 DEBUG [M:0;770665a7984d:45401 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T10:26:23,373 INFO [M:0;770665a7984d:45401 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T10:26:23,373 DEBUG [M:0;770665a7984d:45401 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T10:26:23,373 DEBUG [M:0;770665a7984d:45401 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T10:26:23,373 DEBUG [M:0;770665a7984d:45401 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T10:26:23,373 INFO [M:0;770665a7984d:45401 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=83.38 KB heapSize=102.70 KB 2024-11-13T10:26:23,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741944_1124 (size=119) 2024-11-13T10:26:23,394 DEBUG [M:0;770665a7984d:45401 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e9196fb223704cb4a4f016699050735b is 82, key is hbase:meta,,1/info:regioninfo/1731493562116/Put/seqid=0 2024-11-13T10:26:23,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741945_1125 (size=6063) 2024-11-13T10:26:23,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741945_1125 (size=6063) 2024-11-13T10:26:23,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741945_1125 (size=6063) 2024-11-13T10:26:23,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46143-0x10110dc99880003, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T10:26:23,451 INFO [RS:2;770665a7984d:46143 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T10:26:23,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46143-0x10110dc99880003, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T10:26:23,451 INFO [RS:2;770665a7984d:46143 {}] regionserver.HRegionServer(1031): Exiting; stopping=770665a7984d,46143,1731493541627; zookeeper connection closed. 2024-11-13T10:26:23,451 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@194ec26 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@194ec26 2024-11-13T10:26:23,452 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-13T10:26:23,806 INFO [M:0;770665a7984d:45401 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1008 B at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e9196fb223704cb4a4f016699050735b 2024-11-13T10:26:23,838 DEBUG [M:0;770665a7984d:45401 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/95b06c1af99d44ebbe09871263af82e4 is 1075, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731493559263/Put/seqid=0 2024-11-13T10:26:23,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741946_1126 (size=7906) 2024-11-13T10:26:23,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741946_1126 (size=7906) 2024-11-13T10:26:23,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741946_1126 (size=7906) 2024-11-13T10:26:23,847 INFO [M:0;770665a7984d:45401 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=82.16 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/95b06c1af99d44ebbe09871263af82e4 2024-11-13T10:26:23,853 INFO [M:0;770665a7984d:45401 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 95b06c1af99d44ebbe09871263af82e4 2024-11-13T10:26:23,884 DEBUG [M:0;770665a7984d:45401 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/644d68c54f1d414a917a004da6e15339 is 69, key is 770665a7984d,36821,1731493541562/rs:state/1731493543187/Put/seqid=0 2024-11-13T10:26:23,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741947_1127 (size=5445) 2024-11-13T10:26:23,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45097 is added to blk_1073741947_1127 (size=5445) 2024-11-13T10:26:23,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741947_1127 (size=5445) 2024-11-13T10:26:23,897 INFO [M:0;770665a7984d:45401 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=249 B at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/644d68c54f1d414a917a004da6e15339 2024-11-13T10:26:23,903 INFO [M:0;770665a7984d:45401 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 644d68c54f1d414a917a004da6e15339 2024-11-13T10:26:23,904 DEBUG [M:0;770665a7984d:45401 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e9196fb223704cb4a4f016699050735b as hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e9196fb223704cb4a4f016699050735b 2024-11-13T10:26:23,913 INFO [M:0;770665a7984d:45401 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e9196fb223704cb4a4f016699050735b, entries=14, sequenceid=207, filesize=5.9 K 2024-11-13T10:26:23,914 DEBUG [M:0;770665a7984d:45401 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/95b06c1af99d44ebbe09871263af82e4 as hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/95b06c1af99d44ebbe09871263af82e4 2024-11-13T10:26:23,921 INFO [M:0;770665a7984d:45401 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 95b06c1af99d44ebbe09871263af82e4 2024-11-13T10:26:23,921 INFO [M:0;770665a7984d:45401 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/95b06c1af99d44ebbe09871263af82e4, entries=21, sequenceid=207, filesize=7.7 K 2024-11-13T10:26:23,940 DEBUG [M:0;770665a7984d:45401 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/644d68c54f1d414a917a004da6e15339 as hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/644d68c54f1d414a917a004da6e15339 2024-11-13T10:26:23,948 INFO [M:0;770665a7984d:45401 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 644d68c54f1d414a917a004da6e15339 2024-11-13T10:26:23,948 INFO [M:0;770665a7984d:45401 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41249/user/jenkins/test-data/66fd6cbe-45e3-7dad-ed70-3d84357c7510/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/644d68c54f1d414a917a004da6e15339, entries=3, sequenceid=207, filesize=5.3 K 2024-11-13T10:26:23,949 INFO [M:0;770665a7984d:45401 {}] regionserver.HRegion(3140): Finished flush of dataSize ~83.38 KB/85386, heapSize ~102.40 KB/104856, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 576ms, sequenceid=207, compaction requested=false 2024-11-13T10:26:23,954 INFO [M:0;770665a7984d:45401 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T10:26:23,954 DEBUG [M:0;770665a7984d:45401 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731493583372Disabling compacts and flushes for region at 1731493583372Disabling writes for close at 1731493583373 (+1 ms)Obtaining lock to block concurrent updates at 1731493583373Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731493583373Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=85386, getHeapSize=105096, getOffHeapSize=0, getCellsCount=248 at 1731493583373Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731493583374 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731493583374Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731493583393 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731493583394 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731493583813 (+419 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731493583837 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731493583837Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731493583853 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731493583883 (+30 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731493583883Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@58e18a3e: reopening flushed file at 1731493583903 (+20 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@180025c9: reopening flushed file at 1731493583913 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2c320f4c: reopening flushed file at 1731493583921 (+8 ms)Finished flush of dataSize ~83.38 KB/85386, heapSize ~102.40 KB/104856, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 576ms, sequenceid=207, compaction requested=false at 1731493583949 (+28 ms)Writing region close event to WAL at 1731493583954 (+5 ms)Closed at 1731493583954 2024-11-13T10:26:23,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44787 is added to blk_1073741830_1006 (size=69606) 2024-11-13T10:26:23,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38649 is added to blk_1073741830_1006 (size=69606) 2024-11-13T10:26:23,959 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T10:26:23,959 INFO [M:0;770665a7984d:45401 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-13T10:26:23,960 INFO [M:0;770665a7984d:45401 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45401 2024-11-13T10:26:23,961 INFO [M:0;770665a7984d:45401 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T10:26:24,063 INFO [M:0;770665a7984d:45401 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T10:26:24,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45401-0x10110dc99880000, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T10:26:24,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45401-0x10110dc99880000, quorum=127.0.0.1:51925, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T10:26:24,071 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731493563487/wal.1731493572887 with renewLeaseKey: DEFAULT_16688 java.io.FileNotFoundException: File does not exist: /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731493563487/wal.1731493572887 (inode 16688) Holder DFSClient_NONMAPREDUCE_-2118105142_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731493563487/wal.1731493572887 (inode 16688) Holder DFSClient_NONMAPREDUCE_-2118105142_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 25 more 2024-11-13T10:26:24,074 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731493563035/wal.1731493563340 with renewLeaseKey: DEFAULT_16665 java.io.FileNotFoundException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731493563035/wal.1731493563340 (inode 16665) Holder DFSClient_NONMAPREDUCE_-2118105142_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731493563035/wal.1731493563340 (inode 16665) Holder DFSClient_NONMAPREDUCE_-2118105142_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 25 more 2024-11-13T10:26:24,076 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731493553908/wal.1731493554442 with renewLeaseKey: DEFAULT_16586 java.io.IOException: stream already broken at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.endBlock(FanOutOneBlockAsyncDFSOutput.java:566) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:615) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T10:26:24,078 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testdatalosswheninputerror-manual,16010,1731493573136/wal.1731493573750 with renewLeaseKey: DEFAULT_16714 java.io.FileNotFoundException: File does not exist: /hbase/WALs/testdatalosswheninputerror-manual,16010,1731493573136/wal.1731493573750 (inode 16714) Holder DFSClient_NONMAPREDUCE_-2118105142_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testdatalosswheninputerror-manual,16010,1731493573136/wal.1731493573750 (inode 16714) Holder DFSClient_NONMAPREDUCE_-2118105142_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 25 more 2024-11-13T10:26:24,079 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731493573919/wal.1731493573988 with renewLeaseKey: DEFAULT_16736 java.io.IOException: stream already broken at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.endBlock(FanOutOneBlockAsyncDFSOutput.java:566) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:615) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T10:26:24,079 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731493578361/wal.1731493578635 with renewLeaseKey: DEFAULT_16777 java.io.IOException: stream already broken at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.endBlock(FanOutOneBlockAsyncDFSOutput.java:566) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:615) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T10:26:24,080 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731493546010/wal.1731493546120 with renewLeaseKey: DEFAULT_16506 java.io.IOException: stream already broken at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.endBlock(FanOutOneBlockAsyncDFSOutput.java:566) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:615) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T10:26:24,082 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testnameconflictwhensplit1-manual,16010,1731493545169/wal.1731493545765 with renewLeaseKey: DEFAULT_16485 java.io.FileNotFoundException: File does not exist: /hbase/WALs/testnameconflictwhensplit1-manual,16010,1731493545169/wal.1731493545765 (inode 16485) Holder DFSClient_NONMAPREDUCE_-2118105142_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testnameconflictwhensplit1-manual,16010,1731493545169/wal.1731493545765 (inode 16485) Holder DFSClient_NONMAPREDUCE_-2118105142_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 25 more 2024-11-13T10:26:24,084 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testnameconflictwhensplit0-manual,16010,1731493544484/wal.1731493544841 with renewLeaseKey: DEFAULT_16462 java.io.FileNotFoundException: File does not exist: /hbase/WALs/testnameconflictwhensplit0-manual,16010,1731493544484/wal.1731493544841 (inode 16462) Holder DFSClient_NONMAPREDUCE_-2118105142_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testnameconflictwhensplit0-manual,16010,1731493544484/wal.1731493544841 (inode 16462) Holder DFSClient_NONMAPREDUCE_-2118105142_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 25 more 2024-11-13T10:26:24,092 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@43206bef{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T10:26:24,097 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@228ffa29{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T10:26:24,097 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T10:26:24,097 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3f079a76{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T10:26:24,097 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7da22a2e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/hadoop.log.dir/,STOPPED} 2024-11-13T10:26:24,102 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T10:26:24,102 WARN [BP-357896810-172.17.0.2-1731493536919 heartbeating to localhost/127.0.0.1:41249 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T10:26:24,102 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T10:26:24,102 WARN [BP-357896810-172.17.0.2-1731493536919 heartbeating to localhost/127.0.0.1:41249 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-357896810-172.17.0.2-1731493536919 (Datanode Uuid 65e57e12-06ba-4c67-8670-361c7b675aed) service to localhost/127.0.0.1:41249 2024-11-13T10:26:24,103 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/cluster_048ca973-e213-4f85-e39d-c51a07fc85b5/data/data5/current/BP-357896810-172.17.0.2-1731493536919 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T10:26:24,104 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/cluster_048ca973-e213-4f85-e39d-c51a07fc85b5/data/data6/current/BP-357896810-172.17.0.2-1731493536919 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T10:26:24,104 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T10:26:24,114 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4546bb60{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T10:26:24,114 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7c7d32f8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T10:26:24,114 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T10:26:24,115 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ebbf344{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T10:26:24,115 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4b5fc47c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/hadoop.log.dir/,STOPPED} 2024-11-13T10:26:24,116 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T10:26:24,116 WARN [BP-357896810-172.17.0.2-1731493536919 heartbeating to localhost/127.0.0.1:41249 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T10:26:24,116 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T10:26:24,116 WARN [BP-357896810-172.17.0.2-1731493536919 heartbeating to localhost/127.0.0.1:41249 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-357896810-172.17.0.2-1731493536919 (Datanode Uuid 74f818aa-bde0-444c-be58-669bd3a13313) service to localhost/127.0.0.1:41249 2024-11-13T10:26:24,117 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/cluster_048ca973-e213-4f85-e39d-c51a07fc85b5/data/data3/current/BP-357896810-172.17.0.2-1731493536919 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T10:26:24,117 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/cluster_048ca973-e213-4f85-e39d-c51a07fc85b5/data/data4/current/BP-357896810-172.17.0.2-1731493536919 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T10:26:24,117 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T10:26:24,131 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@36632d60{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T10:26:24,132 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@751d2fa4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T10:26:24,132 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T10:26:24,132 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@433df981{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T10:26:24,132 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1f76f489{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/hadoop.log.dir/,STOPPED} 2024-11-13T10:26:24,134 WARN [BP-357896810-172.17.0.2-1731493536919 heartbeating to localhost/127.0.0.1:41249 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T10:26:24,134 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T10:26:24,134 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T10:26:24,134 WARN [BP-357896810-172.17.0.2-1731493536919 heartbeating to localhost/127.0.0.1:41249 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-357896810-172.17.0.2-1731493536919 (Datanode Uuid 2aff3888-7015-4ad7-9cba-7d6b1e8abc2a) service to localhost/127.0.0.1:41249 2024-11-13T10:26:24,135 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/cluster_048ca973-e213-4f85-e39d-c51a07fc85b5/data/data1/current/BP-357896810-172.17.0.2-1731493536919 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T10:26:24,135 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/cluster_048ca973-e213-4f85-e39d-c51a07fc85b5/data/data2/current/BP-357896810-172.17.0.2-1731493536919 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T10:26:24,136 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T10:26:24,146 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3717288f{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T10:26:24,147 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4bd70930{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T10:26:24,147 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T10:26:24,148 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6dc9d5c1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T10:26:24,148 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4f37ffca{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb8cc3c3-587d-0ee8-efcd-42faad1f0e12/hadoop.log.dir/,STOPPED} 2024-11-13T10:26:24,162 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-13T10:26:24,238 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down