2024-11-08 02:28:40,153 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-11-08 02:28:40,166 main DEBUG Took 0.010792 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-08 02:28:40,167 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-08 02:28:40,167 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-08 02:28:40,168 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-08 02:28:40,169 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 02:28:40,185 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-08 02:28:40,196 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 02:28:40,197 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 02:28:40,198 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 02:28:40,198 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 02:28:40,199 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 02:28:40,199 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 02:28:40,200 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 02:28:40,201 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 02:28:40,201 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 02:28:40,201 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 02:28:40,202 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 02:28:40,203 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 02:28:40,203 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 02:28:40,203 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 02:28:40,204 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 02:28:40,204 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 02:28:40,205 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 02:28:40,205 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 02:28:40,206 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 02:28:40,206 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 02:28:40,206 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 02:28:40,207 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 02:28:40,207 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 02:28:40,207 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 02:28:40,208 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 02:28:40,208 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-08 02:28:40,209 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 02:28:40,211 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-08 02:28:40,212 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-08 02:28:40,213 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-08 02:28:40,214 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-08 02:28:40,214 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-08 02:28:40,223 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-08 02:28:40,226 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-08 02:28:40,228 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-08 02:28:40,228 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-08 02:28:40,228 main DEBUG createAppenders(={Console}) 2024-11-08 02:28:40,229 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-11-08 02:28:40,230 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-11-08 02:28:40,230 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-11-08 02:28:40,230 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-08 02:28:40,231 main DEBUG OutputStream closed 2024-11-08 02:28:40,231 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-08 02:28:40,231 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-08 02:28:40,231 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-11-08 02:28:40,301 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-08 02:28:40,303 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-08 02:28:40,304 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-08 02:28:40,305 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-08 02:28:40,305 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-08 02:28:40,306 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-08 02:28:40,306 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-08 02:28:40,307 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-08 02:28:40,307 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-08 02:28:40,307 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-08 02:28:40,308 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-08 02:28:40,308 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-08 02:28:40,308 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-08 02:28:40,309 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-08 02:28:40,309 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-08 02:28:40,309 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-08 02:28:40,309 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-08 02:28:40,310 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-08 02:28:40,312 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-08 02:28:40,312 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-11-08 02:28:40,313 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-08 02:28:40,313 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-11-08T02:28:40,326 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-11-08 02:28:40,328 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-08 02:28:40,329 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-08T02:28:40,543 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783 2024-11-08T02:28:40,569 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783/cluster_bf7ecdbe-5d4f-e873-f842-da2208887abf, deleteOnExit=true 2024-11-08T02:28:40,571 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783/test.cache.data in system properties and HBase conf 2024-11-08T02:28:40,572 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783/hadoop.tmp.dir in system properties and HBase conf 2024-11-08T02:28:40,573 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783/hadoop.log.dir in system properties and HBase conf 2024-11-08T02:28:40,574 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-08T02:28:40,574 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-08T02:28:40,575 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-08T02:28:40,686 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-08T02:28:40,805 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-08T02:28:40,811 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-08T02:28:40,812 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-08T02:28:40,813 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-08T02:28:40,814 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-08T02:28:40,814 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-08T02:28:40,815 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-08T02:28:40,816 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-08T02:28:40,817 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-08T02:28:40,817 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-08T02:28:40,818 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783/nfs.dump.dir in system properties and HBase conf 2024-11-08T02:28:40,818 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783/java.io.tmpdir in system properties and HBase conf 2024-11-08T02:28:40,819 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-08T02:28:40,820 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-08T02:28:40,820 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-08T02:28:42,050 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-08T02:28:42,119 INFO [Time-limited test {}] log.Log(170): Logging initialized @2581ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-08T02:28:42,187 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T02:28:42,244 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T02:28:42,264 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T02:28:42,264 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T02:28:42,265 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-08T02:28:42,276 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T02:28:42,279 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@760c69c0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783/hadoop.log.dir/,AVAILABLE} 2024-11-08T02:28:42,280 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ce709a8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T02:28:42,447 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@62d6efd9{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783/java.io.tmpdir/jetty-localhost-39639-hadoop-hdfs-3_4_1-tests_jar-_-any-11268845485141136190/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-08T02:28:42,454 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@353d35a1{HTTP/1.1, (http/1.1)}{localhost:39639} 2024-11-08T02:28:42,454 INFO [Time-limited test {}] server.Server(415): Started @2916ms 2024-11-08T02:28:43,017 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T02:28:43,024 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T02:28:43,025 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T02:28:43,025 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T02:28:43,025 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-08T02:28:43,026 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3a5de9e4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783/hadoop.log.dir/,AVAILABLE} 2024-11-08T02:28:43,026 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@69893329{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T02:28:43,123 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1b97a472{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783/java.io.tmpdir/jetty-localhost-41727-hadoop-hdfs-3_4_1-tests_jar-_-any-12037752036281633681/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T02:28:43,123 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3722a29b{HTTP/1.1, (http/1.1)}{localhost:41727} 2024-11-08T02:28:43,124 INFO [Time-limited test {}] server.Server(415): Started @3586ms 2024-11-08T02:28:43,170 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-08T02:28:43,270 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T02:28:43,276 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T02:28:43,280 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T02:28:43,281 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T02:28:43,281 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-08T02:28:43,282 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@510fec09{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783/hadoop.log.dir/,AVAILABLE} 2024-11-08T02:28:43,282 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@40eb7053{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T02:28:43,384 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@353955e9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783/java.io.tmpdir/jetty-localhost-36767-hadoop-hdfs-3_4_1-tests_jar-_-any-9241244438221956169/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T02:28:43,384 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@11738cd8{HTTP/1.1, (http/1.1)}{localhost:36767} 2024-11-08T02:28:43,385 INFO [Time-limited test {}] server.Server(415): Started @3847ms 2024-11-08T02:28:43,387 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-08T02:28:43,442 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T02:28:43,449 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T02:28:43,451 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T02:28:43,451 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T02:28:43,451 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-08T02:28:43,457 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@16cd567f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783/hadoop.log.dir/,AVAILABLE} 2024-11-08T02:28:43,459 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5822645a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T02:28:43,555 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3114ae69{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783/java.io.tmpdir/jetty-localhost-45815-hadoop-hdfs-3_4_1-tests_jar-_-any-1780558207341342688/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T02:28:43,556 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3c70a874{HTTP/1.1, (http/1.1)}{localhost:45815} 2024-11-08T02:28:43,556 INFO [Time-limited test {}] server.Server(415): Started @4018ms 2024-11-08T02:28:43,558 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-08T02:28:44,766 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783/cluster_bf7ecdbe-5d4f-e873-f842-da2208887abf/data/data3/current/BP-309047652-172.17.0.2-1731032921345/current, will proceed with Du for space computation calculation, 2024-11-08T02:28:44,766 WARN [Thread-122 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783/cluster_bf7ecdbe-5d4f-e873-f842-da2208887abf/data/data1/current/BP-309047652-172.17.0.2-1731032921345/current, will proceed with Du for space computation calculation, 2024-11-08T02:28:44,766 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783/cluster_bf7ecdbe-5d4f-e873-f842-da2208887abf/data/data4/current/BP-309047652-172.17.0.2-1731032921345/current, will proceed with Du for space computation calculation, 2024-11-08T02:28:44,766 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783/cluster_bf7ecdbe-5d4f-e873-f842-da2208887abf/data/data2/current/BP-309047652-172.17.0.2-1731032921345/current, will proceed with Du for space computation calculation, 2024-11-08T02:28:44,798 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-08T02:28:44,798 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-08T02:28:44,842 WARN [Thread-142 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783/cluster_bf7ecdbe-5d4f-e873-f842-da2208887abf/data/data5/current/BP-309047652-172.17.0.2-1731032921345/current, will proceed with Du for space computation calculation, 2024-11-08T02:28:44,842 WARN [Thread-143 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783/cluster_bf7ecdbe-5d4f-e873-f842-da2208887abf/data/data6/current/BP-309047652-172.17.0.2-1731032921345/current, will proceed with Du for space computation calculation, 2024-11-08T02:28:44,845 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xad050b86cc3194c1 with lease ID 0x6a58bf1a38c7bdba: Processing first storage report for DS-a3afc5fb-c723-4357-8648-97c21e154d70 from datanode DatanodeRegistration(127.0.0.1:42169, datanodeUuid=6e393b64-2798-4512-9c6d-48dace9da3b8, infoPort=45927, infoSecurePort=0, ipcPort=37041, storageInfo=lv=-57;cid=testClusterID;nsid=1583422316;c=1731032921345) 2024-11-08T02:28:44,847 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xad050b86cc3194c1 with lease ID 0x6a58bf1a38c7bdba: from storage DS-a3afc5fb-c723-4357-8648-97c21e154d70 node DatanodeRegistration(127.0.0.1:42169, datanodeUuid=6e393b64-2798-4512-9c6d-48dace9da3b8, infoPort=45927, infoSecurePort=0, ipcPort=37041, storageInfo=lv=-57;cid=testClusterID;nsid=1583422316;c=1731032921345), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-08T02:28:44,847 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7f7a9678911f4454 with lease ID 0x6a58bf1a38c7bdbb: Processing first storage report for DS-421283ef-8ea1-48ec-9a96-f6be689657d3 from datanode DatanodeRegistration(127.0.0.1:39163, datanodeUuid=676964a8-a1d3-4b61-a58b-b9d6a169f768, infoPort=34965, infoSecurePort=0, ipcPort=36319, storageInfo=lv=-57;cid=testClusterID;nsid=1583422316;c=1731032921345) 2024-11-08T02:28:44,848 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7f7a9678911f4454 with lease ID 0x6a58bf1a38c7bdbb: from storage DS-421283ef-8ea1-48ec-9a96-f6be689657d3 node DatanodeRegistration(127.0.0.1:39163, datanodeUuid=676964a8-a1d3-4b61-a58b-b9d6a169f768, infoPort=34965, infoSecurePort=0, ipcPort=36319, storageInfo=lv=-57;cid=testClusterID;nsid=1583422316;c=1731032921345), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T02:28:44,848 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xad050b86cc3194c1 with lease ID 0x6a58bf1a38c7bdba: Processing first storage report for DS-9cc05715-d96b-46a8-bf68-abbc7b134ada from datanode DatanodeRegistration(127.0.0.1:42169, datanodeUuid=6e393b64-2798-4512-9c6d-48dace9da3b8, infoPort=45927, infoSecurePort=0, ipcPort=37041, storageInfo=lv=-57;cid=testClusterID;nsid=1583422316;c=1731032921345) 2024-11-08T02:28:44,848 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xad050b86cc3194c1 with lease ID 0x6a58bf1a38c7bdba: from storage DS-9cc05715-d96b-46a8-bf68-abbc7b134ada node DatanodeRegistration(127.0.0.1:42169, datanodeUuid=6e393b64-2798-4512-9c6d-48dace9da3b8, infoPort=45927, infoSecurePort=0, ipcPort=37041, storageInfo=lv=-57;cid=testClusterID;nsid=1583422316;c=1731032921345), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-08T02:28:44,848 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7f7a9678911f4454 with lease ID 0x6a58bf1a38c7bdbb: Processing first storage report for DS-d41f6e1d-a3ae-402d-b2d2-02e2d2df4943 from datanode DatanodeRegistration(127.0.0.1:39163, datanodeUuid=676964a8-a1d3-4b61-a58b-b9d6a169f768, infoPort=34965, infoSecurePort=0, ipcPort=36319, storageInfo=lv=-57;cid=testClusterID;nsid=1583422316;c=1731032921345) 2024-11-08T02:28:44,849 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7f7a9678911f4454 with lease ID 0x6a58bf1a38c7bdbb: from storage DS-d41f6e1d-a3ae-402d-b2d2-02e2d2df4943 node DatanodeRegistration(127.0.0.1:39163, datanodeUuid=676964a8-a1d3-4b61-a58b-b9d6a169f768, infoPort=34965, infoSecurePort=0, ipcPort=36319, storageInfo=lv=-57;cid=testClusterID;nsid=1583422316;c=1731032921345), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T02:28:44,867 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-08T02:28:44,872 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3425bb3becd34dd with lease ID 0x6a58bf1a38c7bdbc: Processing first storage report for DS-cc5b2ba5-28e7-4656-8e94-6a28ac8b2aa3 from datanode DatanodeRegistration(127.0.0.1:41719, datanodeUuid=34b6d3b3-2205-44a8-87db-43f6551f6d3f, infoPort=46277, infoSecurePort=0, ipcPort=39397, storageInfo=lv=-57;cid=testClusterID;nsid=1583422316;c=1731032921345) 2024-11-08T02:28:44,872 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3425bb3becd34dd with lease ID 0x6a58bf1a38c7bdbc: from storage DS-cc5b2ba5-28e7-4656-8e94-6a28ac8b2aa3 node DatanodeRegistration(127.0.0.1:41719, datanodeUuid=34b6d3b3-2205-44a8-87db-43f6551f6d3f, infoPort=46277, infoSecurePort=0, ipcPort=39397, storageInfo=lv=-57;cid=testClusterID;nsid=1583422316;c=1731032921345), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-08T02:28:44,873 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3425bb3becd34dd with lease ID 0x6a58bf1a38c7bdbc: Processing first storage report for DS-068925cc-1e0d-450b-9846-fd5ff187f120 from datanode DatanodeRegistration(127.0.0.1:41719, datanodeUuid=34b6d3b3-2205-44a8-87db-43f6551f6d3f, infoPort=46277, infoSecurePort=0, ipcPort=39397, storageInfo=lv=-57;cid=testClusterID;nsid=1583422316;c=1731032921345) 2024-11-08T02:28:44,873 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3425bb3becd34dd with lease ID 0x6a58bf1a38c7bdbc: from storage DS-068925cc-1e0d-450b-9846-fd5ff187f120 node DatanodeRegistration(127.0.0.1:41719, datanodeUuid=34b6d3b3-2205-44a8-87db-43f6551f6d3f, infoPort=46277, infoSecurePort=0, ipcPort=39397, storageInfo=lv=-57;cid=testClusterID;nsid=1583422316;c=1731032921345), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T02:28:44,962 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783 2024-11-08T02:28:45,033 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-11-08T02:28:45,093 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=161, OpenFileDescriptor=393, MaxFileDescriptor=1048576, SystemLoadAverage=390, ProcessCount=11, AvailableMemoryMB=7602 2024-11-08T02:28:45,095 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-08T02:28:45,103 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-11-08T02:28:45,197 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783/cluster_bf7ecdbe-5d4f-e873-f842-da2208887abf/zookeeper_0, clientPort=58803, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783/cluster_bf7ecdbe-5d4f-e873-f842-da2208887abf/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783/cluster_bf7ecdbe-5d4f-e873-f842-da2208887abf/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-08T02:28:45,215 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58803 2024-11-08T02:28:45,224 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T02:28:45,227 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T02:28:45,302 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T02:28:45,302 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T02:28:45,341 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1077523476_22 at /127.0.0.1:41652 [Receiving block BP-309047652-172.17.0.2-1731032921345:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:41719:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41652 dst: /127.0.0.1:41719 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T02:28:45,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41719 is added to blk_-9223372036854775792_1002 (size=7) 2024-11-08T02:28:45,762 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T02:28:45,781 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a with version=8 2024-11-08T02:28:45,782 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/hbase-staging 2024-11-08T02:28:45,864 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-08T02:28:46,118 INFO [Time-limited test {}] client.ConnectionUtils(128): master/331c7316141f:0 server-side Connection retries=45 2024-11-08T02:28:46,127 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T02:28:46,128 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-08T02:28:46,132 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-08T02:28:46,133 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T02:28:46,133 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-08T02:28:46,265 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-08T02:28:46,323 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-08T02:28:46,331 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-08T02:28:46,334 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-08T02:28:46,357 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 86472 (auto-detected) 2024-11-08T02:28:46,358 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-08T02:28:46,375 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37595 2024-11-08T02:28:46,394 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37595 connecting to ZooKeeper ensemble=127.0.0.1:58803 2024-11-08T02:28:46,520 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:375950x0, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-08T02:28:46,524 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37595-0x10118463fa90000 connected 2024-11-08T02:28:46,614 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T02:28:46,618 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T02:28:46,628 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37595-0x10118463fa90000, quorum=127.0.0.1:58803, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T02:28:46,633 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a, hbase.cluster.distributed=false 2024-11-08T02:28:46,659 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37595-0x10118463fa90000, quorum=127.0.0.1:58803, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-08T02:28:46,664 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37595 2024-11-08T02:28:46,664 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37595 2024-11-08T02:28:46,665 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37595 2024-11-08T02:28:46,665 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37595 2024-11-08T02:28:46,665 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37595 2024-11-08T02:28:46,753 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/331c7316141f:0 server-side Connection retries=45 2024-11-08T02:28:46,755 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T02:28:46,755 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-08T02:28:46,755 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-08T02:28:46,755 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T02:28:46,755 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-08T02:28:46,758 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-08T02:28:46,760 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-08T02:28:46,761 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42943 2024-11-08T02:28:46,763 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42943 connecting to ZooKeeper ensemble=127.0.0.1:58803 2024-11-08T02:28:46,764 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T02:28:46,768 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T02:28:46,779 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:429430x0, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-08T02:28:46,780 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:429430x0, quorum=127.0.0.1:58803, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T02:28:46,780 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42943-0x10118463fa90001 connected 2024-11-08T02:28:46,784 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-08T02:28:46,790 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-08T02:28:46,793 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42943-0x10118463fa90001, quorum=127.0.0.1:58803, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-08T02:28:46,798 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42943-0x10118463fa90001, quorum=127.0.0.1:58803, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-08T02:28:46,798 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42943 2024-11-08T02:28:46,799 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42943 2024-11-08T02:28:46,800 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42943 2024-11-08T02:28:46,801 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42943 2024-11-08T02:28:46,801 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42943 2024-11-08T02:28:46,818 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/331c7316141f:0 server-side Connection retries=45 2024-11-08T02:28:46,818 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T02:28:46,818 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-08T02:28:46,819 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-08T02:28:46,819 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T02:28:46,819 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-08T02:28:46,820 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-08T02:28:46,820 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-08T02:28:46,821 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41241 2024-11-08T02:28:46,823 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41241 connecting to ZooKeeper ensemble=127.0.0.1:58803 2024-11-08T02:28:46,825 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T02:28:46,830 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T02:28:46,843 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:412410x0, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-08T02:28:46,844 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41241-0x10118463fa90002 connected 2024-11-08T02:28:46,844 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41241-0x10118463fa90002, quorum=127.0.0.1:58803, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T02:28:46,845 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-08T02:28:46,846 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-08T02:28:46,847 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41241-0x10118463fa90002, quorum=127.0.0.1:58803, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-08T02:28:46,850 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41241-0x10118463fa90002, quorum=127.0.0.1:58803, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-08T02:28:46,850 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41241 2024-11-08T02:28:46,851 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41241 2024-11-08T02:28:46,852 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41241 2024-11-08T02:28:46,853 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41241 2024-11-08T02:28:46,853 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41241 2024-11-08T02:28:46,868 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/331c7316141f:0 server-side Connection retries=45 2024-11-08T02:28:46,869 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T02:28:46,869 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-08T02:28:46,869 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-08T02:28:46,869 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T02:28:46,869 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-08T02:28:46,869 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-08T02:28:46,870 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-08T02:28:46,871 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39565 2024-11-08T02:28:46,872 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39565 connecting to ZooKeeper ensemble=127.0.0.1:58803 2024-11-08T02:28:46,874 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T02:28:46,876 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T02:28:46,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:395650x0, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-08T02:28:46,885 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39565-0x10118463fa90003 connected 2024-11-08T02:28:46,886 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39565-0x10118463fa90003, quorum=127.0.0.1:58803, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T02:28:46,886 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-08T02:28:46,887 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-08T02:28:46,888 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39565-0x10118463fa90003, quorum=127.0.0.1:58803, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-08T02:28:46,890 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39565-0x10118463fa90003, quorum=127.0.0.1:58803, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-08T02:28:46,890 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39565 2024-11-08T02:28:46,891 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39565 2024-11-08T02:28:46,892 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39565 2024-11-08T02:28:46,892 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39565 2024-11-08T02:28:46,893 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39565 2024-11-08T02:28:46,910 DEBUG [M:0;331c7316141f:37595 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;331c7316141f:37595 2024-11-08T02:28:46,912 INFO [master/331c7316141f:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/331c7316141f,37595,1731032925968 2024-11-08T02:28:46,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39565-0x10118463fa90003, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T02:28:46,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37595-0x10118463fa90000, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T02:28:46,927 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41241-0x10118463fa90002, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T02:28:46,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42943-0x10118463fa90001, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T02:28:46,930 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37595-0x10118463fa90000, quorum=127.0.0.1:58803, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/331c7316141f,37595,1731032925968 2024-11-08T02:28:47,030 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41241-0x10118463fa90002, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-08T02:28:47,030 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42943-0x10118463fa90001, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-08T02:28:47,030 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39565-0x10118463fa90003, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-08T02:28:47,031 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41241-0x10118463fa90002, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:47,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42943-0x10118463fa90001, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:47,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39565-0x10118463fa90003, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:47,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37595-0x10118463fa90000, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:47,034 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37595-0x10118463fa90000, quorum=127.0.0.1:58803, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-08T02:28:47,037 INFO [master/331c7316141f:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/331c7316141f,37595,1731032925968 from backup master directory 2024-11-08T02:28:47,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37595-0x10118463fa90000, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/331c7316141f,37595,1731032925968 2024-11-08T02:28:47,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39565-0x10118463fa90003, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T02:28:47,085 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41241-0x10118463fa90002, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T02:28:47,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42943-0x10118463fa90001, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T02:28:47,086 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37595-0x10118463fa90000, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T02:28:47,087 WARN [master/331c7316141f:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-08T02:28:47,087 INFO [master/331c7316141f:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=331c7316141f,37595,1731032925968 2024-11-08T02:28:47,090 INFO [master/331c7316141f:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-08T02:28:47,092 INFO [master/331c7316141f:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-08T02:28:47,151 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/hbase.id] with ID: a323f0c7-64b9-42d2-9480-11b2a58afac8 2024-11-08T02:28:47,151 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/.tmp/hbase.id 2024-11-08T02:28:47,158 WARN [master/331c7316141f:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T02:28:47,158 WARN [master/331c7316141f:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T02:28:47,162 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1077523476_22 at /127.0.0.1:41670 [Receiving block BP-309047652-172.17.0.2-1731032921345:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:41719:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41670 dst: /127.0.0.1:41719 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T02:28:47,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41719 is added to blk_-9223372036854775776_1004 (size=42) 2024-11-08T02:28:47,170 WARN [master/331c7316141f:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T02:28:47,170 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/.tmp/hbase.id]:[hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/hbase.id] 2024-11-08T02:28:47,212 INFO [master/331c7316141f:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T02:28:47,216 INFO [master/331c7316141f:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-08T02:28:47,233 INFO [master/331c7316141f:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 16ms. 2024-11-08T02:28:47,263 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37595-0x10118463fa90000, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:47,263 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42943-0x10118463fa90001, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:47,263 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39565-0x10118463fa90003, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:47,263 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41241-0x10118463fa90002, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:47,277 WARN [master/331c7316141f:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T02:28:47,277 WARN [master/331c7316141f:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T02:28:47,280 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1077523476_22 at /127.0.0.1:41688 [Receiving block BP-309047652-172.17.0.2-1731032921345:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:41719:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41688 dst: /127.0.0.1:41719 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T02:28:47,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41719 is added to blk_-9223372036854775760_1006 (size=196) 2024-11-08T02:28:47,287 WARN [master/331c7316141f:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T02:28:47,303 INFO [master/331c7316141f:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-08T02:28:47,305 INFO [master/331c7316141f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-08T02:28:47,310 INFO [master/331c7316141f:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-08T02:28:47,336 WARN [master/331c7316141f:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T02:28:47,336 WARN [master/331c7316141f:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T02:28:47,339 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1077523476_22 at /127.0.0.1:41706 [Receiving block BP-309047652-172.17.0.2-1731032921345:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:41719:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41706 dst: /127.0.0.1:41719 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T02:28:47,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41719 is added to blk_-9223372036854775744_1008 (size=1189) 2024-11-08T02:28:47,345 WARN [master/331c7316141f:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T02:28:47,360 INFO [master/331c7316141f:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/MasterData/data/master/store 2024-11-08T02:28:47,375 WARN [master/331c7316141f:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T02:28:47,375 WARN [master/331c7316141f:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T02:28:47,378 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1077523476_22 at /127.0.0.1:54804 [Receiving block BP-309047652-172.17.0.2-1731032921345:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:39163:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54804 dst: /127.0.0.1:39163 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T02:28:47,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39163 is added to blk_-9223372036854775728_1010 (size=34) 2024-11-08T02:28:47,384 WARN [master/331c7316141f:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T02:28:47,388 INFO [master/331c7316141f:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-08T02:28:47,391 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T02:28:47,392 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-08T02:28:47,392 INFO [master/331c7316141f:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T02:28:47,392 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T02:28:47,394 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 1 ms 2024-11-08T02:28:47,394 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T02:28:47,394 INFO [master/331c7316141f:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T02:28:47,395 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731032927392Disabling compacts and flushes for region at 1731032927392Disabling writes for close at 1731032927394 (+2 ms)Writing region close event to WAL at 1731032927394Closed at 1731032927394 2024-11-08T02:28:47,397 WARN [master/331c7316141f:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/MasterData/data/master/store/.initializing 2024-11-08T02:28:47,398 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/MasterData/WALs/331c7316141f,37595,1731032925968 2024-11-08T02:28:47,405 INFO [master/331c7316141f:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-08T02:28:47,419 INFO [master/331c7316141f:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=331c7316141f%2C37595%2C1731032925968, suffix=, logDir=hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/MasterData/WALs/331c7316141f,37595,1731032925968, archiveDir=hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/MasterData/oldWALs, maxLogs=10 2024-11-08T02:28:47,444 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/MasterData/WALs/331c7316141f,37595,1731032925968/331c7316141f%2C37595%2C1731032925968.1731032927423, exclude list is [], retry=0 2024-11-08T02:28:47,461 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:414) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:473) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:468) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T02:28:47,462 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42169,DS-a3afc5fb-c723-4357-8648-97c21e154d70,DISK] 2024-11-08T02:28:47,462 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39163,DS-421283ef-8ea1-48ec-9a96-f6be689657d3,DISK] 2024-11-08T02:28:47,462 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41719,DS-cc5b2ba5-28e7-4656-8e94-6a28ac8b2aa3,DISK] 2024-11-08T02:28:47,465 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-08T02:28:47,501 INFO [master/331c7316141f:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/MasterData/WALs/331c7316141f,37595,1731032925968/331c7316141f%2C37595%2C1731032925968.1731032927423 2024-11-08T02:28:47,502 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45927:45927),(127.0.0.1/127.0.0.1:46277:46277),(127.0.0.1/127.0.0.1:34965:34965)] 2024-11-08T02:28:47,503 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-08T02:28:47,503 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T02:28:47,507 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T02:28:47,508 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T02:28:47,542 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T02:28:47,563 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-08T02:28:47,566 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T02:28:47,569 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T02:28:47,569 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T02:28:47,572 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-08T02:28:47,572 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T02:28:47,574 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T02:28:47,574 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T02:28:47,577 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-08T02:28:47,577 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T02:28:47,578 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T02:28:47,578 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T02:28:47,581 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-08T02:28:47,581 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T02:28:47,582 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T02:28:47,582 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T02:28:47,585 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-08T02:28:47,586 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-08T02:28:47,591 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T02:28:47,591 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T02:28:47,595 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-08T02:28:47,598 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T02:28:47,604 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-08T02:28:47,605 INFO [master/331c7316141f:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62759542, jitterRate=-0.06480994820594788}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-08T02:28:47,612 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731032927520Initializing all the Stores at 1731032927522 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731032927523 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731032927523Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731032927523Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731032927524 (+1 ms)Cleaning up temporary data from old regions at 1731032927591 (+67 ms)Region opened successfully at 1731032927612 (+21 ms) 2024-11-08T02:28:47,613 INFO [master/331c7316141f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-08T02:28:47,643 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@555799b1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=331c7316141f/172.17.0.2:0 2024-11-08T02:28:47,673 INFO [master/331c7316141f:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-08T02:28:47,683 INFO [master/331c7316141f:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-08T02:28:47,683 INFO [master/331c7316141f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-08T02:28:47,686 INFO [master/331c7316141f:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-08T02:28:47,688 INFO [master/331c7316141f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-08T02:28:47,692 INFO [master/331c7316141f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 3 msec 2024-11-08T02:28:47,692 INFO [master/331c7316141f:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-08T02:28:47,714 INFO [master/331c7316141f:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-08T02:28:47,721 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37595-0x10118463fa90000, quorum=127.0.0.1:58803, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-08T02:28:47,779 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-08T02:28:47,783 INFO [master/331c7316141f:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-08T02:28:47,785 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37595-0x10118463fa90000, quorum=127.0.0.1:58803, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-08T02:28:47,790 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-08T02:28:47,793 INFO [master/331c7316141f:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-08T02:28:47,796 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37595-0x10118463fa90000, quorum=127.0.0.1:58803, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-08T02:28:47,800 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-08T02:28:47,802 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37595-0x10118463fa90000, quorum=127.0.0.1:58803, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-08T02:28:47,811 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-08T02:28:47,837 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37595-0x10118463fa90000, quorum=127.0.0.1:58803, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-08T02:28:47,842 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-08T02:28:47,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39163 is added to blk_-9223372036854775789_1002 (size=7) 2024-11-08T02:28:47,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_-9223372036854775788_1002 (size=7) 2024-11-08T02:28:47,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39565-0x10118463fa90003, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-08T02:28:47,853 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41241-0x10118463fa90002, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-08T02:28:47,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37595-0x10118463fa90000, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-08T02:28:47,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37595-0x10118463fa90000, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:47,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42943-0x10118463fa90001, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-08T02:28:47,854 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42943-0x10118463fa90001, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:47,854 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41241-0x10118463fa90002, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:47,854 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39565-0x10118463fa90003, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:47,857 INFO [master/331c7316141f:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=331c7316141f,37595,1731032925968, sessionid=0x10118463fa90000, setting cluster-up flag (Was=false) 2024-11-08T02:28:47,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39565-0x10118463fa90003, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:47,885 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41241-0x10118463fa90002, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:47,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37595-0x10118463fa90000, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:47,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42943-0x10118463fa90001, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:47,917 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-08T02:28:47,922 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=331c7316141f,37595,1731032925968 2024-11-08T02:28:47,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39565-0x10118463fa90003, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:47,948 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41241-0x10118463fa90002, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:47,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42943-0x10118463fa90001, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:47,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37595-0x10118463fa90000, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:47,979 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-08T02:28:47,982 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=331c7316141f,37595,1731032925968 2024-11-08T02:28:47,989 INFO [master/331c7316141f:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-08T02:28:47,997 INFO [RS:1;331c7316141f:41241 {}] regionserver.HRegionServer(746): ClusterId : a323f0c7-64b9-42d2-9480-11b2a58afac8 2024-11-08T02:28:47,997 INFO [RS:2;331c7316141f:39565 {}] regionserver.HRegionServer(746): ClusterId : a323f0c7-64b9-42d2-9480-11b2a58afac8 2024-11-08T02:28:47,997 INFO [RS:0;331c7316141f:42943 {}] regionserver.HRegionServer(746): ClusterId : a323f0c7-64b9-42d2-9480-11b2a58afac8 2024-11-08T02:28:48,000 DEBUG [RS:1;331c7316141f:41241 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-08T02:28:48,000 DEBUG [RS:2;331c7316141f:39565 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-08T02:28:48,000 DEBUG [RS:0;331c7316141f:42943 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-08T02:28:48,023 DEBUG [RS:1;331c7316141f:41241 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-08T02:28:48,023 DEBUG [RS:0;331c7316141f:42943 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-08T02:28:48,023 DEBUG [RS:2;331c7316141f:39565 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-08T02:28:48,024 DEBUG [RS:1;331c7316141f:41241 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-08T02:28:48,024 DEBUG [RS:0;331c7316141f:42943 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-08T02:28:48,024 DEBUG [RS:2;331c7316141f:39565 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-08T02:28:48,043 DEBUG [RS:1;331c7316141f:41241 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-08T02:28:48,043 DEBUG [RS:0;331c7316141f:42943 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-08T02:28:48,043 DEBUG [RS:2;331c7316141f:39565 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-08T02:28:48,044 DEBUG [RS:1;331c7316141f:41241 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f129a2a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=331c7316141f/172.17.0.2:0 2024-11-08T02:28:48,044 DEBUG [RS:0;331c7316141f:42943 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b586d08, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=331c7316141f/172.17.0.2:0 2024-11-08T02:28:48,044 DEBUG [RS:2;331c7316141f:39565 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@345a1cb4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=331c7316141f/172.17.0.2:0 2024-11-08T02:28:48,061 DEBUG [RS:1;331c7316141f:41241 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;331c7316141f:41241 2024-11-08T02:28:48,062 DEBUG [RS:2;331c7316141f:39565 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;331c7316141f:39565 2024-11-08T02:28:48,062 DEBUG [RS:0;331c7316141f:42943 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;331c7316141f:42943 2024-11-08T02:28:48,065 INFO [RS:2;331c7316141f:39565 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-08T02:28:48,065 INFO [RS:0;331c7316141f:42943 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-08T02:28:48,065 INFO [RS:1;331c7316141f:41241 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-08T02:28:48,065 INFO [RS:2;331c7316141f:39565 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-08T02:28:48,065 INFO [RS:0;331c7316141f:42943 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-08T02:28:48,065 INFO [RS:1;331c7316141f:41241 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-08T02:28:48,065 DEBUG [RS:2;331c7316141f:39565 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-08T02:28:48,065 DEBUG [RS:0;331c7316141f:42943 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-08T02:28:48,065 DEBUG [RS:1;331c7316141f:41241 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-08T02:28:48,068 INFO [RS:0;331c7316141f:42943 {}] regionserver.HRegionServer(2659): reportForDuty to master=331c7316141f,37595,1731032925968 with port=42943, startcode=1731032926723 2024-11-08T02:28:48,068 INFO [RS:1;331c7316141f:41241 {}] regionserver.HRegionServer(2659): reportForDuty to master=331c7316141f,37595,1731032925968 with port=41241, startcode=1731032926817 2024-11-08T02:28:48,068 INFO [RS:2;331c7316141f:39565 {}] regionserver.HRegionServer(2659): reportForDuty to master=331c7316141f,37595,1731032925968 with port=39565, startcode=1731032926868 2024-11-08T02:28:48,071 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-08T02:28:48,081 DEBUG [RS:1;331c7316141f:41241 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-08T02:28:48,081 DEBUG [RS:2;331c7316141f:39565 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-08T02:28:48,081 DEBUG [RS:0;331c7316141f:42943 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-08T02:28:48,082 INFO [master/331c7316141f:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-08T02:28:48,089 INFO [master/331c7316141f:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-08T02:28:48,094 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 331c7316141f,37595,1731032925968 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-08T02:28:48,101 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/331c7316141f:0, corePoolSize=5, maxPoolSize=5 2024-11-08T02:28:48,102 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/331c7316141f:0, corePoolSize=5, maxPoolSize=5 2024-11-08T02:28:48,102 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/331c7316141f:0, corePoolSize=5, maxPoolSize=5 2024-11-08T02:28:48,102 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/331c7316141f:0, corePoolSize=5, maxPoolSize=5 2024-11-08T02:28:48,102 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/331c7316141f:0, corePoolSize=10, maxPoolSize=10 2024-11-08T02:28:48,102 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:48,102 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/331c7316141f:0, corePoolSize=2, maxPoolSize=2 2024-11-08T02:28:48,102 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:48,111 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-08T02:28:48,111 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-08T02:28:48,117 INFO [master/331c7316141f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731032958117 2024-11-08T02:28:48,119 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42149, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-08T02:28:48,119 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54961, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-08T02:28:48,119 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52903, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-08T02:28:48,119 INFO [master/331c7316141f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-08T02:28:48,120 INFO [master/331c7316141f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-08T02:28:48,121 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T02:28:48,122 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-08T02:28:48,125 INFO [master/331c7316141f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-08T02:28:48,125 INFO [master/331c7316141f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-08T02:28:48,125 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37595 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-08T02:28:48,125 INFO [master/331c7316141f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-08T02:28:48,126 INFO [master/331c7316141f:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-08T02:28:48,126 INFO [master/331c7316141f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:48,131 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37595 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-08T02:28:48,131 INFO [master/331c7316141f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-08T02:28:48,132 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37595 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-08T02:28:48,132 INFO [master/331c7316141f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-08T02:28:48,133 INFO [master/331c7316141f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-08T02:28:48,134 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T02:28:48,134 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T02:28:48,135 INFO [master/331c7316141f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-08T02:28:48,135 INFO [master/331c7316141f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-08T02:28:48,137 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/331c7316141f:0:becomeActiveMaster-HFileCleaner.large.0-1731032928136,5,FailOnTimeoutGroup] 2024-11-08T02:28:48,138 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/331c7316141f:0:becomeActiveMaster-HFileCleaner.small.0-1731032928137,5,FailOnTimeoutGroup] 2024-11-08T02:28:48,138 INFO [master/331c7316141f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:48,138 INFO [master/331c7316141f:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-08T02:28:48,139 INFO [master/331c7316141f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:48,139 INFO [master/331c7316141f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:48,140 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1077523476_22 at /127.0.0.1:54842 [Receiving block BP-309047652-172.17.0.2-1731032921345:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:39163:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54842 dst: /127.0.0.1:39163 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T02:28:48,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39163 is added to blk_-9223372036854775712_1013 (size=1321) 2024-11-08T02:28:48,149 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T02:28:48,150 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-08T02:28:48,151 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a 2024-11-08T02:28:48,153 DEBUG [RS:0;331c7316141f:42943 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-08T02:28:48,153 DEBUG [RS:1;331c7316141f:41241 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-08T02:28:48,153 DEBUG [RS:2;331c7316141f:39565 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-08T02:28:48,153 WARN [RS:0;331c7316141f:42943 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-08T02:28:48,153 WARN [RS:1;331c7316141f:41241 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-08T02:28:48,153 WARN [RS:2;331c7316141f:39565 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-08T02:28:48,162 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T02:28:48,162 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T02:28:48,165 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1077523476_22 at /127.0.0.1:41746 [Receiving block BP-309047652-172.17.0.2-1731032921345:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:41719:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41746 dst: /127.0.0.1:41719 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T02:28:48,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41719 is added to blk_-9223372036854775696_1015 (size=32) 2024-11-08T02:28:48,173 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T02:28:48,174 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T02:28:48,180 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-08T02:28:48,183 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-08T02:28:48,183 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T02:28:48,184 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T02:28:48,184 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-08T02:28:48,187 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-08T02:28:48,188 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T02:28:48,189 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T02:28:48,189 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-08T02:28:48,192 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-08T02:28:48,192 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T02:28:48,193 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T02:28:48,193 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-08T02:28:48,196 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-08T02:28:48,196 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T02:28:48,197 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T02:28:48,197 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-08T02:28:48,199 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/data/hbase/meta/1588230740 2024-11-08T02:28:48,200 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/data/hbase/meta/1588230740 2024-11-08T02:28:48,203 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-08T02:28:48,203 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-08T02:28:48,204 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-08T02:28:48,207 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-08T02:28:48,217 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-08T02:28:48,219 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73698860, jitterRate=0.09819859266281128}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-08T02:28:48,224 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731032928175Initializing all the Stores at 1731032928176 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731032928176Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731032928179 (+3 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731032928179Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731032928179Cleaning up temporary data from old regions at 1731032928203 (+24 ms)Region opened successfully at 1731032928224 (+21 ms) 2024-11-08T02:28:48,224 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-08T02:28:48,224 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-08T02:28:48,225 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-08T02:28:48,225 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-08T02:28:48,225 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-08T02:28:48,227 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-08T02:28:48,227 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731032928224Disabling compacts and flushes for region at 1731032928224Disabling writes for close at 1731032928225 (+1 ms)Writing region close event to WAL at 1731032928226 (+1 ms)Closed at 1731032928226 2024-11-08T02:28:48,231 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-08T02:28:48,231 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-08T02:28:48,237 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-08T02:28:48,245 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-08T02:28:48,249 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-08T02:28:48,255 INFO [RS:0;331c7316141f:42943 {}] regionserver.HRegionServer(2659): reportForDuty to master=331c7316141f,37595,1731032925968 with port=42943, startcode=1731032926723 2024-11-08T02:28:48,255 INFO [RS:1;331c7316141f:41241 {}] regionserver.HRegionServer(2659): reportForDuty to master=331c7316141f,37595,1731032925968 with port=41241, startcode=1731032926817 2024-11-08T02:28:48,255 INFO [RS:2;331c7316141f:39565 {}] regionserver.HRegionServer(2659): reportForDuty to master=331c7316141f,37595,1731032925968 with port=39565, startcode=1731032926868 2024-11-08T02:28:48,256 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37595 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 331c7316141f,41241,1731032926817 2024-11-08T02:28:48,259 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37595 {}] master.ServerManager(517): Registering regionserver=331c7316141f,41241,1731032926817 2024-11-08T02:28:48,266 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37595 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 331c7316141f,39565,1731032926868 2024-11-08T02:28:48,266 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37595 {}] master.ServerManager(517): Registering regionserver=331c7316141f,39565,1731032926868 2024-11-08T02:28:48,266 DEBUG [RS:1;331c7316141f:41241 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a 2024-11-08T02:28:48,266 DEBUG [RS:1;331c7316141f:41241 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39361 2024-11-08T02:28:48,266 DEBUG [RS:1;331c7316141f:41241 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-08T02:28:48,268 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37595 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 331c7316141f,42943,1731032926723 2024-11-08T02:28:48,269 DEBUG [RS:2;331c7316141f:39565 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a 2024-11-08T02:28:48,269 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37595 {}] master.ServerManager(517): Registering regionserver=331c7316141f,42943,1731032926723 2024-11-08T02:28:48,269 DEBUG [RS:2;331c7316141f:39565 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39361 2024-11-08T02:28:48,269 DEBUG [RS:2;331c7316141f:39565 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-08T02:28:48,272 DEBUG [RS:0;331c7316141f:42943 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a 2024-11-08T02:28:48,272 DEBUG [RS:0;331c7316141f:42943 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39361 2024-11-08T02:28:48,272 DEBUG [RS:0;331c7316141f:42943 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-08T02:28:48,274 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37595-0x10118463fa90000, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-08T02:28:48,317 DEBUG [RS:2;331c7316141f:39565 {}] zookeeper.ZKUtil(111): regionserver:39565-0x10118463fa90003, quorum=127.0.0.1:58803, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/331c7316141f,39565,1731032926868 2024-11-08T02:28:48,317 WARN [RS:2;331c7316141f:39565 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-08T02:28:48,318 DEBUG [RS:1;331c7316141f:41241 {}] zookeeper.ZKUtil(111): regionserver:41241-0x10118463fa90002, quorum=127.0.0.1:58803, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/331c7316141f,41241,1731032926817 2024-11-08T02:28:48,318 DEBUG [RS:0;331c7316141f:42943 {}] zookeeper.ZKUtil(111): regionserver:42943-0x10118463fa90001, quorum=127.0.0.1:58803, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/331c7316141f,42943,1731032926723 2024-11-08T02:28:48,318 WARN [RS:1;331c7316141f:41241 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-08T02:28:48,318 INFO [RS:2;331c7316141f:39565 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-08T02:28:48,318 WARN [RS:0;331c7316141f:42943 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-08T02:28:48,318 INFO [RS:1;331c7316141f:41241 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-08T02:28:48,318 INFO [RS:0;331c7316141f:42943 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-08T02:28:48,318 DEBUG [RS:2;331c7316141f:39565 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/WALs/331c7316141f,39565,1731032926868 2024-11-08T02:28:48,318 DEBUG [RS:1;331c7316141f:41241 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/WALs/331c7316141f,41241,1731032926817 2024-11-08T02:28:48,318 DEBUG [RS:0;331c7316141f:42943 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/WALs/331c7316141f,42943,1731032926723 2024-11-08T02:28:48,320 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [331c7316141f,39565,1731032926868] 2024-11-08T02:28:48,321 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [331c7316141f,42943,1731032926723] 2024-11-08T02:28:48,321 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [331c7316141f,41241,1731032926817] 2024-11-08T02:28:48,344 INFO [RS:0;331c7316141f:42943 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-08T02:28:48,344 INFO [RS:2;331c7316141f:39565 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-08T02:28:48,344 INFO [RS:1;331c7316141f:41241 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-08T02:28:48,361 INFO [RS:1;331c7316141f:41241 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-08T02:28:48,361 INFO [RS:0;331c7316141f:42943 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-08T02:28:48,361 INFO [RS:2;331c7316141f:39565 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-08T02:28:48,367 INFO [RS:0;331c7316141f:42943 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-08T02:28:48,367 INFO [RS:2;331c7316141f:39565 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-08T02:28:48,367 INFO [RS:1;331c7316141f:41241 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-08T02:28:48,367 INFO [RS:0;331c7316141f:42943 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:48,367 INFO [RS:2;331c7316141f:39565 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:48,367 INFO [RS:1;331c7316141f:41241 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:48,368 INFO [RS:0;331c7316141f:42943 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-08T02:28:48,371 INFO [RS:2;331c7316141f:39565 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-08T02:28:48,371 INFO [RS:1;331c7316141f:41241 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-08T02:28:48,373 INFO [RS:0;331c7316141f:42943 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-08T02:28:48,373 INFO [RS:2;331c7316141f:39565 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-08T02:28:48,373 INFO [RS:1;331c7316141f:41241 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-08T02:28:48,375 INFO [RS:2;331c7316141f:39565 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:48,375 INFO [RS:0;331c7316141f:42943 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:48,375 INFO [RS:1;331c7316141f:41241 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:48,375 DEBUG [RS:2;331c7316141f:39565 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:48,375 DEBUG [RS:0;331c7316141f:42943 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:48,375 DEBUG [RS:1;331c7316141f:41241 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:48,375 DEBUG [RS:2;331c7316141f:39565 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:48,375 DEBUG [RS:0;331c7316141f:42943 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:48,375 DEBUG [RS:1;331c7316141f:41241 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:48,375 DEBUG [RS:2;331c7316141f:39565 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:48,375 DEBUG [RS:0;331c7316141f:42943 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:48,375 DEBUG [RS:1;331c7316141f:41241 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:48,375 DEBUG [RS:2;331c7316141f:39565 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:48,375 DEBUG [RS:0;331c7316141f:42943 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:48,375 DEBUG [RS:0;331c7316141f:42943 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:48,375 DEBUG [RS:2;331c7316141f:39565 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:48,375 DEBUG [RS:1;331c7316141f:41241 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:48,375 DEBUG [RS:0;331c7316141f:42943 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/331c7316141f:0, corePoolSize=2, maxPoolSize=2 2024-11-08T02:28:48,375 DEBUG [RS:2;331c7316141f:39565 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/331c7316141f:0, corePoolSize=2, maxPoolSize=2 2024-11-08T02:28:48,375 DEBUG [RS:1;331c7316141f:41241 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:48,376 DEBUG [RS:0;331c7316141f:42943 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:48,376 DEBUG [RS:2;331c7316141f:39565 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:48,376 DEBUG [RS:1;331c7316141f:41241 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/331c7316141f:0, corePoolSize=2, maxPoolSize=2 2024-11-08T02:28:48,376 DEBUG [RS:0;331c7316141f:42943 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:48,376 DEBUG [RS:2;331c7316141f:39565 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:48,376 DEBUG [RS:0;331c7316141f:42943 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:48,376 DEBUG [RS:2;331c7316141f:39565 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:48,376 DEBUG [RS:1;331c7316141f:41241 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:48,376 DEBUG [RS:0;331c7316141f:42943 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:48,376 DEBUG [RS:2;331c7316141f:39565 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:48,376 DEBUG [RS:1;331c7316141f:41241 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:48,376 DEBUG [RS:0;331c7316141f:42943 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:48,376 DEBUG [RS:2;331c7316141f:39565 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:48,376 DEBUG [RS:0;331c7316141f:42943 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:48,376 DEBUG [RS:1;331c7316141f:41241 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:48,376 DEBUG [RS:2;331c7316141f:39565 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:48,376 DEBUG [RS:0;331c7316141f:42943 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/331c7316141f:0, corePoolSize=3, maxPoolSize=3 2024-11-08T02:28:48,376 DEBUG [RS:2;331c7316141f:39565 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/331c7316141f:0, corePoolSize=3, maxPoolSize=3 2024-11-08T02:28:48,376 DEBUG [RS:1;331c7316141f:41241 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:48,376 DEBUG [RS:0;331c7316141f:42943 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/331c7316141f:0, corePoolSize=3, maxPoolSize=3 2024-11-08T02:28:48,376 DEBUG [RS:2;331c7316141f:39565 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/331c7316141f:0, corePoolSize=3, maxPoolSize=3 2024-11-08T02:28:48,376 DEBUG [RS:1;331c7316141f:41241 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:48,377 DEBUG [RS:1;331c7316141f:41241 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:48,377 DEBUG [RS:1;331c7316141f:41241 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/331c7316141f:0, corePoolSize=3, maxPoolSize=3 2024-11-08T02:28:48,377 DEBUG [RS:1;331c7316141f:41241 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/331c7316141f:0, corePoolSize=3, maxPoolSize=3 2024-11-08T02:28:48,378 INFO [RS:2;331c7316141f:39565 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:48,378 INFO [RS:2;331c7316141f:39565 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:48,378 INFO [RS:2;331c7316141f:39565 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:48,378 INFO [RS:2;331c7316141f:39565 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:48,379 INFO [RS:2;331c7316141f:39565 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:48,379 INFO [RS:2;331c7316141f:39565 {}] hbase.ChoreService(168): Chore ScheduledChore name=331c7316141f,39565,1731032926868-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-08T02:28:48,380 INFO [RS:0;331c7316141f:42943 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:48,380 INFO [RS:1;331c7316141f:41241 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:48,380 INFO [RS:0;331c7316141f:42943 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:48,380 INFO [RS:1;331c7316141f:41241 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:48,380 INFO [RS:0;331c7316141f:42943 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:48,380 INFO [RS:1;331c7316141f:41241 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:48,380 INFO [RS:0;331c7316141f:42943 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:48,380 INFO [RS:1;331c7316141f:41241 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:48,380 INFO [RS:1;331c7316141f:41241 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:48,380 INFO [RS:0;331c7316141f:42943 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:48,380 INFO [RS:1;331c7316141f:41241 {}] hbase.ChoreService(168): Chore ScheduledChore name=331c7316141f,41241,1731032926817-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-08T02:28:48,380 INFO [RS:0;331c7316141f:42943 {}] hbase.ChoreService(168): Chore ScheduledChore name=331c7316141f,42943,1731032926723-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-08T02:28:48,397 INFO [RS:1;331c7316141f:41241 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-08T02:28:48,399 INFO [RS:2;331c7316141f:39565 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-08T02:28:48,399 INFO [RS:1;331c7316141f:41241 {}] hbase.ChoreService(168): Chore ScheduledChore name=331c7316141f,41241,1731032926817-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:48,399 INFO [RS:2;331c7316141f:39565 {}] hbase.ChoreService(168): Chore ScheduledChore name=331c7316141f,39565,1731032926868-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:48,400 INFO [RS:1;331c7316141f:41241 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:48,400 INFO [RS:2;331c7316141f:39565 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:48,400 INFO [RS:1;331c7316141f:41241 {}] regionserver.Replication(171): 331c7316141f,41241,1731032926817 started 2024-11-08T02:28:48,400 INFO [RS:2;331c7316141f:39565 {}] regionserver.Replication(171): 331c7316141f,39565,1731032926868 started 2024-11-08T02:28:48,400 WARN [331c7316141f:37595 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-08T02:28:48,407 INFO [RS:0;331c7316141f:42943 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-08T02:28:48,407 INFO [RS:0;331c7316141f:42943 {}] hbase.ChoreService(168): Chore ScheduledChore name=331c7316141f,42943,1731032926723-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:48,407 INFO [RS:0;331c7316141f:42943 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:48,407 INFO [RS:0;331c7316141f:42943 {}] regionserver.Replication(171): 331c7316141f,42943,1731032926723 started 2024-11-08T02:28:48,418 INFO [RS:1;331c7316141f:41241 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:48,418 INFO [RS:1;331c7316141f:41241 {}] regionserver.HRegionServer(1482): Serving as 331c7316141f,41241,1731032926817, RpcServer on 331c7316141f/172.17.0.2:41241, sessionid=0x10118463fa90002 2024-11-08T02:28:48,419 DEBUG [RS:1;331c7316141f:41241 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-08T02:28:48,419 DEBUG [RS:1;331c7316141f:41241 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 331c7316141f,41241,1731032926817 2024-11-08T02:28:48,419 DEBUG [RS:1;331c7316141f:41241 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '331c7316141f,41241,1731032926817' 2024-11-08T02:28:48,420 DEBUG [RS:1;331c7316141f:41241 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-08T02:28:48,420 DEBUG [RS:1;331c7316141f:41241 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-08T02:28:48,421 DEBUG [RS:1;331c7316141f:41241 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-08T02:28:48,421 DEBUG [RS:1;331c7316141f:41241 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-08T02:28:48,421 DEBUG [RS:1;331c7316141f:41241 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 331c7316141f,41241,1731032926817 2024-11-08T02:28:48,421 DEBUG [RS:1;331c7316141f:41241 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '331c7316141f,41241,1731032926817' 2024-11-08T02:28:48,421 DEBUG [RS:1;331c7316141f:41241 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-08T02:28:48,422 DEBUG [RS:1;331c7316141f:41241 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-08T02:28:48,422 INFO [RS:0;331c7316141f:42943 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:48,422 INFO [RS:0;331c7316141f:42943 {}] regionserver.HRegionServer(1482): Serving as 331c7316141f,42943,1731032926723, RpcServer on 331c7316141f/172.17.0.2:42943, sessionid=0x10118463fa90001 2024-11-08T02:28:48,422 DEBUG [RS:1;331c7316141f:41241 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-08T02:28:48,422 DEBUG [RS:0;331c7316141f:42943 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-08T02:28:48,422 INFO [RS:1;331c7316141f:41241 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-08T02:28:48,422 DEBUG [RS:0;331c7316141f:42943 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 331c7316141f,42943,1731032926723 2024-11-08T02:28:48,422 DEBUG [RS:0;331c7316141f:42943 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '331c7316141f,42943,1731032926723' 2024-11-08T02:28:48,422 INFO [RS:1;331c7316141f:41241 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-08T02:28:48,422 DEBUG [RS:0;331c7316141f:42943 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-08T02:28:48,423 DEBUG [RS:0;331c7316141f:42943 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-08T02:28:48,423 INFO [RS:2;331c7316141f:39565 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:48,423 INFO [RS:2;331c7316141f:39565 {}] regionserver.HRegionServer(1482): Serving as 331c7316141f,39565,1731032926868, RpcServer on 331c7316141f/172.17.0.2:39565, sessionid=0x10118463fa90003 2024-11-08T02:28:48,423 DEBUG [RS:2;331c7316141f:39565 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-08T02:28:48,423 DEBUG [RS:0;331c7316141f:42943 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-08T02:28:48,424 DEBUG [RS:2;331c7316141f:39565 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 331c7316141f,39565,1731032926868 2024-11-08T02:28:48,424 DEBUG [RS:0;331c7316141f:42943 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-08T02:28:48,424 DEBUG [RS:0;331c7316141f:42943 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 331c7316141f,42943,1731032926723 2024-11-08T02:28:48,424 DEBUG [RS:2;331c7316141f:39565 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '331c7316141f,39565,1731032926868' 2024-11-08T02:28:48,424 DEBUG [RS:0;331c7316141f:42943 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '331c7316141f,42943,1731032926723' 2024-11-08T02:28:48,424 DEBUG [RS:2;331c7316141f:39565 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-08T02:28:48,424 DEBUG [RS:0;331c7316141f:42943 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-08T02:28:48,424 DEBUG [RS:0;331c7316141f:42943 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-08T02:28:48,424 DEBUG [RS:2;331c7316141f:39565 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-08T02:28:48,425 DEBUG [RS:0;331c7316141f:42943 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-08T02:28:48,425 INFO [RS:0;331c7316141f:42943 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-08T02:28:48,425 INFO [RS:0;331c7316141f:42943 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-08T02:28:48,425 DEBUG [RS:2;331c7316141f:39565 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-08T02:28:48,425 DEBUG [RS:2;331c7316141f:39565 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-08T02:28:48,425 DEBUG [RS:2;331c7316141f:39565 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 331c7316141f,39565,1731032926868 2024-11-08T02:28:48,425 DEBUG [RS:2;331c7316141f:39565 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '331c7316141f,39565,1731032926868' 2024-11-08T02:28:48,425 DEBUG [RS:2;331c7316141f:39565 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-08T02:28:48,426 DEBUG [RS:2;331c7316141f:39565 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-08T02:28:48,426 DEBUG [RS:2;331c7316141f:39565 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-08T02:28:48,426 INFO [RS:2;331c7316141f:39565 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-08T02:28:48,426 INFO [RS:2;331c7316141f:39565 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-08T02:28:48,529 INFO [RS:0;331c7316141f:42943 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-08T02:28:48,529 INFO [RS:1;331c7316141f:41241 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-08T02:28:48,529 INFO [RS:2;331c7316141f:39565 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-08T02:28:48,533 INFO [RS:1;331c7316141f:41241 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=331c7316141f%2C41241%2C1731032926817, suffix=, logDir=hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/WALs/331c7316141f,41241,1731032926817, archiveDir=hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/oldWALs, maxLogs=32 2024-11-08T02:28:48,533 INFO [RS:2;331c7316141f:39565 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=331c7316141f%2C39565%2C1731032926868, suffix=, logDir=hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/WALs/331c7316141f,39565,1731032926868, archiveDir=hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/oldWALs, maxLogs=32 2024-11-08T02:28:48,533 INFO [RS:0;331c7316141f:42943 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=331c7316141f%2C42943%2C1731032926723, suffix=, logDir=hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/WALs/331c7316141f,42943,1731032926723, archiveDir=hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/oldWALs, maxLogs=32 2024-11-08T02:28:48,549 DEBUG [RS:2;331c7316141f:39565 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/WALs/331c7316141f,39565,1731032926868/331c7316141f%2C39565%2C1731032926868.1731032928537, exclude list is [], retry=0 2024-11-08T02:28:48,551 DEBUG [RS:0;331c7316141f:42943 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/WALs/331c7316141f,42943,1731032926723/331c7316141f%2C42943%2C1731032926723.1731032928537, exclude list is [], retry=0 2024-11-08T02:28:48,554 DEBUG [RS:1;331c7316141f:41241 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/WALs/331c7316141f,41241,1731032926817/331c7316141f%2C41241%2C1731032926817.1731032928537, exclude list is [], retry=0 2024-11-08T02:28:48,554 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39163,DS-421283ef-8ea1-48ec-9a96-f6be689657d3,DISK] 2024-11-08T02:28:48,555 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41719,DS-cc5b2ba5-28e7-4656-8e94-6a28ac8b2aa3,DISK] 2024-11-08T02:28:48,555 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42169,DS-a3afc5fb-c723-4357-8648-97c21e154d70,DISK] 2024-11-08T02:28:48,557 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39163,DS-421283ef-8ea1-48ec-9a96-f6be689657d3,DISK] 2024-11-08T02:28:48,557 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41719,DS-cc5b2ba5-28e7-4656-8e94-6a28ac8b2aa3,DISK] 2024-11-08T02:28:48,558 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42169,DS-a3afc5fb-c723-4357-8648-97c21e154d70,DISK] 2024-11-08T02:28:48,586 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42169,DS-a3afc5fb-c723-4357-8648-97c21e154d70,DISK] 2024-11-08T02:28:48,586 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39163,DS-421283ef-8ea1-48ec-9a96-f6be689657d3,DISK] 2024-11-08T02:28:48,587 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41719,DS-cc5b2ba5-28e7-4656-8e94-6a28ac8b2aa3,DISK] 2024-11-08T02:28:48,589 INFO [RS:2;331c7316141f:39565 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/WALs/331c7316141f,39565,1731032926868/331c7316141f%2C39565%2C1731032926868.1731032928537 2024-11-08T02:28:48,592 DEBUG [RS:2;331c7316141f:39565 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:46277:46277),(127.0.0.1/127.0.0.1:34965:34965),(127.0.0.1/127.0.0.1:45927:45927)] 2024-11-08T02:28:48,593 INFO [RS:0;331c7316141f:42943 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/WALs/331c7316141f,42943,1731032926723/331c7316141f%2C42943%2C1731032926723.1731032928537 2024-11-08T02:28:48,594 DEBUG [RS:0;331c7316141f:42943 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:46277:46277),(127.0.0.1/127.0.0.1:34965:34965),(127.0.0.1/127.0.0.1:45927:45927)] 2024-11-08T02:28:48,595 INFO [RS:1;331c7316141f:41241 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/WALs/331c7316141f,41241,1731032926817/331c7316141f%2C41241%2C1731032926817.1731032928537 2024-11-08T02:28:48,596 DEBUG [RS:1;331c7316141f:41241 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:46277:46277),(127.0.0.1/127.0.0.1:45927:45927),(127.0.0.1/127.0.0.1:34965:34965)] 2024-11-08T02:28:48,652 DEBUG [331c7316141f:37595 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-08T02:28:48,662 DEBUG [331c7316141f:37595 {}] balancer.BalancerClusterState(204): Hosts are {331c7316141f=0} racks are {/default-rack=0} 2024-11-08T02:28:48,670 DEBUG [331c7316141f:37595 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-08T02:28:48,670 DEBUG [331c7316141f:37595 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-08T02:28:48,670 DEBUG [331c7316141f:37595 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-08T02:28:48,670 DEBUG [331c7316141f:37595 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-08T02:28:48,670 DEBUG [331c7316141f:37595 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-08T02:28:48,670 DEBUG [331c7316141f:37595 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-08T02:28:48,670 INFO [331c7316141f:37595 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-08T02:28:48,671 INFO [331c7316141f:37595 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-08T02:28:48,671 INFO [331c7316141f:37595 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-08T02:28:48,671 DEBUG [331c7316141f:37595 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-08T02:28:48,679 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=331c7316141f,39565,1731032926868 2024-11-08T02:28:48,685 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 331c7316141f,39565,1731032926868, state=OPENING 2024-11-08T02:28:48,705 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-08T02:28:48,716 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42943-0x10118463fa90001, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:48,716 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41241-0x10118463fa90002, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:48,716 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37595-0x10118463fa90000, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:48,716 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39565-0x10118463fa90003, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:48,717 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T02:28:48,717 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T02:28:48,717 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T02:28:48,718 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T02:28:48,719 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-08T02:28:48,721 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=331c7316141f,39565,1731032926868}] 2024-11-08T02:28:48,898 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-08T02:28:48,900 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39113, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-08T02:28:48,911 INFO [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-08T02:28:48,912 INFO [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-08T02:28:48,913 INFO [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-08T02:28:48,916 INFO [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=331c7316141f%2C39565%2C1731032926868.meta, suffix=.meta, logDir=hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/WALs/331c7316141f,39565,1731032926868, archiveDir=hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/oldWALs, maxLogs=32 2024-11-08T02:28:48,936 DEBUG [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/WALs/331c7316141f,39565,1731032926868/331c7316141f%2C39565%2C1731032926868.meta.1731032928919.meta, exclude list is [], retry=0 2024-11-08T02:28:48,940 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42169,DS-a3afc5fb-c723-4357-8648-97c21e154d70,DISK] 2024-11-08T02:28:48,940 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39163,DS-421283ef-8ea1-48ec-9a96-f6be689657d3,DISK] 2024-11-08T02:28:48,941 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41719,DS-cc5b2ba5-28e7-4656-8e94-6a28ac8b2aa3,DISK] 2024-11-08T02:28:48,944 INFO [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/WALs/331c7316141f,39565,1731032926868/331c7316141f%2C39565%2C1731032926868.meta.1731032928919.meta 2024-11-08T02:28:48,944 DEBUG [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34965:34965),(127.0.0.1/127.0.0.1:45927:45927),(127.0.0.1/127.0.0.1:46277:46277)] 2024-11-08T02:28:48,945 DEBUG [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-08T02:28:48,947 DEBUG [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-08T02:28:48,950 DEBUG [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-08T02:28:48,955 INFO [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-08T02:28:48,958 DEBUG [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-08T02:28:48,959 DEBUG [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T02:28:48,959 DEBUG [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-08T02:28:48,959 DEBUG [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-08T02:28:48,962 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-08T02:28:48,964 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-08T02:28:48,964 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T02:28:48,965 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T02:28:48,965 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-08T02:28:48,966 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-08T02:28:48,966 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T02:28:48,967 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T02:28:48,967 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-08T02:28:48,969 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-08T02:28:48,969 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T02:28:48,970 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T02:28:48,970 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-08T02:28:48,971 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-08T02:28:48,971 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T02:28:48,972 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T02:28:48,972 DEBUG [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-08T02:28:48,973 DEBUG [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/data/hbase/meta/1588230740 2024-11-08T02:28:48,976 DEBUG [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/data/hbase/meta/1588230740 2024-11-08T02:28:48,978 DEBUG [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-08T02:28:48,978 DEBUG [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-08T02:28:48,979 DEBUG [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-08T02:28:48,981 DEBUG [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-08T02:28:48,983 INFO [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65302687, jitterRate=-0.026914134621620178}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-08T02:28:48,983 DEBUG [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-08T02:28:48,984 DEBUG [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731032928959Writing region info on filesystem at 1731032928960 (+1 ms)Initializing all the Stores at 1731032928962 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731032928962Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731032928962Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731032928962Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731032928962Cleaning up temporary data from old regions at 1731032928978 (+16 ms)Running coprocessor post-open hooks at 1731032928983 (+5 ms)Region opened successfully at 1731032928984 (+1 ms) 2024-11-08T02:28:48,992 INFO [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731032928889 2024-11-08T02:28:49,004 DEBUG [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-08T02:28:49,005 INFO [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-08T02:28:49,006 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=331c7316141f,39565,1731032926868 2024-11-08T02:28:49,008 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 331c7316141f,39565,1731032926868, state=OPEN 2024-11-08T02:28:49,064 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39565-0x10118463fa90003, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-08T02:28:49,064 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41241-0x10118463fa90002, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-08T02:28:49,064 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42943-0x10118463fa90001, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-08T02:28:49,064 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37595-0x10118463fa90000, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-08T02:28:49,064 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T02:28:49,064 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T02:28:49,064 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T02:28:49,064 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T02:28:49,064 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=331c7316141f,39565,1731032926868 2024-11-08T02:28:49,072 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-08T02:28:49,072 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=331c7316141f,39565,1731032926868 in 345 msec 2024-11-08T02:28:49,080 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-08T02:28:49,080 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 838 msec 2024-11-08T02:28:49,083 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-08T02:28:49,083 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-08T02:28:49,100 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-08T02:28:49,102 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=331c7316141f,39565,1731032926868, seqNum=-1] 2024-11-08T02:28:49,138 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-08T02:28:49,141 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45135, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-08T02:28:49,171 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1470 sec 2024-11-08T02:28:49,172 INFO [master/331c7316141f:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731032929171, completionTime=-1 2024-11-08T02:28:49,175 INFO [master/331c7316141f:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-08T02:28:49,175 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-08T02:28:49,200 INFO [master/331c7316141f:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=3 2024-11-08T02:28:49,200 INFO [master/331c7316141f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731032989200 2024-11-08T02:28:49,200 INFO [master/331c7316141f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731033049200 2024-11-08T02:28:49,200 INFO [master/331c7316141f:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 25 msec 2024-11-08T02:28:49,202 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-08T02:28:49,208 INFO [master/331c7316141f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=331c7316141f,37595,1731032925968-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:49,209 INFO [master/331c7316141f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=331c7316141f,37595,1731032925968-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:49,209 INFO [master/331c7316141f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=331c7316141f,37595,1731032925968-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:49,210 INFO [master/331c7316141f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-331c7316141f:37595, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:49,210 INFO [master/331c7316141f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:49,211 INFO [master/331c7316141f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:49,218 DEBUG [master/331c7316141f:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-08T02:28:49,240 INFO [master/331c7316141f:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.152sec 2024-11-08T02:28:49,242 INFO [master/331c7316141f:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-08T02:28:49,243 INFO [master/331c7316141f:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-08T02:28:49,244 INFO [master/331c7316141f:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-08T02:28:49,244 INFO [master/331c7316141f:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-08T02:28:49,245 INFO [master/331c7316141f:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-08T02:28:49,245 INFO [master/331c7316141f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=331c7316141f,37595,1731032925968-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-08T02:28:49,246 INFO [master/331c7316141f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=331c7316141f,37595,1731032925968-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-08T02:28:49,250 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-08T02:28:49,251 INFO [master/331c7316141f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-08T02:28:49,252 INFO [master/331c7316141f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=331c7316141f,37595,1731032925968-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:49,305 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@106eafae, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-08T02:28:49,309 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-08T02:28:49,309 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-08T02:28:49,312 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 331c7316141f,37595,-1 for getting cluster id 2024-11-08T02:28:49,314 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-08T02:28:49,323 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a323f0c7-64b9-42d2-9480-11b2a58afac8' 2024-11-08T02:28:49,325 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-08T02:28:49,325 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a323f0c7-64b9-42d2-9480-11b2a58afac8" 2024-11-08T02:28:49,326 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@686af684, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-08T02:28:49,326 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [331c7316141f,37595,-1] 2024-11-08T02:28:49,329 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-08T02:28:49,330 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T02:28:49,332 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41030, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-08T02:28:49,334 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@527aea6b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-08T02:28:49,335 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-08T02:28:49,341 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=331c7316141f,39565,1731032926868, seqNum=-1] 2024-11-08T02:28:49,342 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-08T02:28:49,344 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39566, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-08T02:28:49,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=331c7316141f,37595,1731032925968 2024-11-08T02:28:49,366 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-08T02:28:49,370 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 331c7316141f,37595,1731032925968 2024-11-08T02:28:49,372 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@41f9323d 2024-11-08T02:28:49,373 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-08T02:28:49,375 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41046, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-08T02:28:49,381 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37595 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-08T02:28:49,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37595 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-11-08T02:28:49,390 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-11-08T02:28:49,392 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37595 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-11-08T02:28:49,392 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T02:28:49,394 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-08T02:28:49,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37595 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-08T02:28:49,402 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T02:28:49,402 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T02:28:49,409 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1077523476_22 at /127.0.0.1:60102 [Receiving block BP-309047652-172.17.0.2-1731032921345:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:42169:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60102 dst: /127.0.0.1:42169 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T02:28:49,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_-9223372036854775680_1021 (size=392) 2024-11-08T02:28:49,419 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T02:28:49,423 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => da5507206b6d5da39fa7e8e2a50d6ed6, NAME => 'TestHBaseWalOnEC,,1731032929377.da5507206b6d5da39fa7e8e2a50d6ed6.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a 2024-11-08T02:28:49,430 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T02:28:49,430 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T02:28:49,436 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1077523476_22 at /127.0.0.1:54922 [Receiving block BP-309047652-172.17.0.2-1731032921345:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:39163:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54922 dst: /127.0.0.1:39163 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T02:28:49,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39163 is added to blk_-9223372036854775664_1023 (size=51) 2024-11-08T02:28:49,445 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T02:28:49,446 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731032929377.da5507206b6d5da39fa7e8e2a50d6ed6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T02:28:49,446 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing da5507206b6d5da39fa7e8e2a50d6ed6, disabling compactions & flushes 2024-11-08T02:28:49,446 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731032929377.da5507206b6d5da39fa7e8e2a50d6ed6. 2024-11-08T02:28:49,446 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731032929377.da5507206b6d5da39fa7e8e2a50d6ed6. 2024-11-08T02:28:49,446 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731032929377.da5507206b6d5da39fa7e8e2a50d6ed6. after waiting 0 ms 2024-11-08T02:28:49,446 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731032929377.da5507206b6d5da39fa7e8e2a50d6ed6. 2024-11-08T02:28:49,446 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731032929377.da5507206b6d5da39fa7e8e2a50d6ed6. 2024-11-08T02:28:49,447 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for da5507206b6d5da39fa7e8e2a50d6ed6: Waiting for close lock at 1731032929446Disabling compacts and flushes for region at 1731032929446Disabling writes for close at 1731032929446Writing region close event to WAL at 1731032929446Closed at 1731032929446 2024-11-08T02:28:49,449 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-11-08T02:28:49,454 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1731032929377.da5507206b6d5da39fa7e8e2a50d6ed6.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1731032929449"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731032929449"}]},"ts":"1731032929449"} 2024-11-08T02:28:49,459 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-08T02:28:49,461 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-08T02:28:49,464 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731032929461"}]},"ts":"1731032929461"} 2024-11-08T02:28:49,468 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-11-08T02:28:49,469 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {331c7316141f=0} racks are {/default-rack=0} 2024-11-08T02:28:49,470 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-08T02:28:49,470 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-08T02:28:49,470 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-08T02:28:49,470 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-08T02:28:49,470 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-08T02:28:49,470 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-08T02:28:49,470 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-08T02:28:49,470 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-08T02:28:49,470 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-08T02:28:49,470 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-08T02:28:49,471 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=da5507206b6d5da39fa7e8e2a50d6ed6, ASSIGN}] 2024-11-08T02:28:49,474 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=da5507206b6d5da39fa7e8e2a50d6ed6, ASSIGN 2024-11-08T02:28:49,476 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=da5507206b6d5da39fa7e8e2a50d6ed6, ASSIGN; state=OFFLINE, location=331c7316141f,39565,1731032926868; forceNewPlan=false, retain=false 2024-11-08T02:28:49,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37595 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-08T02:28:49,631 INFO [331c7316141f:37595 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-08T02:28:49,632 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=da5507206b6d5da39fa7e8e2a50d6ed6, regionState=OPENING, regionLocation=331c7316141f,39565,1731032926868 2024-11-08T02:28:49,638 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=da5507206b6d5da39fa7e8e2a50d6ed6, ASSIGN because future has completed 2024-11-08T02:28:49,639 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure da5507206b6d5da39fa7e8e2a50d6ed6, server=331c7316141f,39565,1731032926868}] 2024-11-08T02:28:49,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37595 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-08T02:28:49,804 INFO [RS_OPEN_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1731032929377.da5507206b6d5da39fa7e8e2a50d6ed6. 2024-11-08T02:28:49,805 DEBUG [RS_OPEN_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => da5507206b6d5da39fa7e8e2a50d6ed6, NAME => 'TestHBaseWalOnEC,,1731032929377.da5507206b6d5da39fa7e8e2a50d6ed6.', STARTKEY => '', ENDKEY => ''} 2024-11-08T02:28:49,805 DEBUG [RS_OPEN_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC da5507206b6d5da39fa7e8e2a50d6ed6 2024-11-08T02:28:49,806 DEBUG [RS_OPEN_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731032929377.da5507206b6d5da39fa7e8e2a50d6ed6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T02:28:49,806 DEBUG [RS_OPEN_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for da5507206b6d5da39fa7e8e2a50d6ed6 2024-11-08T02:28:49,806 DEBUG [RS_OPEN_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for da5507206b6d5da39fa7e8e2a50d6ed6 2024-11-08T02:28:49,809 INFO [StoreOpener-da5507206b6d5da39fa7e8e2a50d6ed6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region da5507206b6d5da39fa7e8e2a50d6ed6 2024-11-08T02:28:49,812 INFO [StoreOpener-da5507206b6d5da39fa7e8e2a50d6ed6-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region da5507206b6d5da39fa7e8e2a50d6ed6 columnFamilyName cf 2024-11-08T02:28:49,812 DEBUG [StoreOpener-da5507206b6d5da39fa7e8e2a50d6ed6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T02:28:49,813 INFO [StoreOpener-da5507206b6d5da39fa7e8e2a50d6ed6-1 {}] regionserver.HStore(327): Store=da5507206b6d5da39fa7e8e2a50d6ed6/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T02:28:49,813 DEBUG [RS_OPEN_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for da5507206b6d5da39fa7e8e2a50d6ed6 2024-11-08T02:28:49,815 DEBUG [RS_OPEN_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/data/default/TestHBaseWalOnEC/da5507206b6d5da39fa7e8e2a50d6ed6 2024-11-08T02:28:49,815 DEBUG [RS_OPEN_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/data/default/TestHBaseWalOnEC/da5507206b6d5da39fa7e8e2a50d6ed6 2024-11-08T02:28:49,816 DEBUG [RS_OPEN_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for da5507206b6d5da39fa7e8e2a50d6ed6 2024-11-08T02:28:49,816 DEBUG [RS_OPEN_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for da5507206b6d5da39fa7e8e2a50d6ed6 2024-11-08T02:28:49,819 DEBUG [RS_OPEN_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for da5507206b6d5da39fa7e8e2a50d6ed6 2024-11-08T02:28:49,824 DEBUG [RS_OPEN_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/data/default/TestHBaseWalOnEC/da5507206b6d5da39fa7e8e2a50d6ed6/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-08T02:28:49,825 INFO [RS_OPEN_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened da5507206b6d5da39fa7e8e2a50d6ed6; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72387402, jitterRate=0.07865634560585022}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-08T02:28:49,825 DEBUG [RS_OPEN_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for da5507206b6d5da39fa7e8e2a50d6ed6 2024-11-08T02:28:49,826 DEBUG [RS_OPEN_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for da5507206b6d5da39fa7e8e2a50d6ed6: Running coprocessor pre-open hook at 1731032929806Writing region info on filesystem at 1731032929806Initializing all the Stores at 1731032929809 (+3 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731032929809Cleaning up temporary data from old regions at 1731032929816 (+7 ms)Running coprocessor post-open hooks at 1731032929825 (+9 ms)Region opened successfully at 1731032929826 (+1 ms) 2024-11-08T02:28:49,828 INFO [RS_OPEN_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1731032929377.da5507206b6d5da39fa7e8e2a50d6ed6., pid=6, masterSystemTime=1731032929793 2024-11-08T02:28:49,831 DEBUG [RS_OPEN_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1731032929377.da5507206b6d5da39fa7e8e2a50d6ed6. 2024-11-08T02:28:49,831 INFO [RS_OPEN_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1731032929377.da5507206b6d5da39fa7e8e2a50d6ed6. 2024-11-08T02:28:49,833 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=da5507206b6d5da39fa7e8e2a50d6ed6, regionState=OPEN, openSeqNum=2, regionLocation=331c7316141f,39565,1731032926868 2024-11-08T02:28:49,837 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure da5507206b6d5da39fa7e8e2a50d6ed6, server=331c7316141f,39565,1731032926868 because future has completed 2024-11-08T02:28:49,843 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-08T02:28:49,843 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure da5507206b6d5da39fa7e8e2a50d6ed6, server=331c7316141f,39565,1731032926868 in 201 msec 2024-11-08T02:28:49,847 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-08T02:28:49,847 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=da5507206b6d5da39fa7e8e2a50d6ed6, ASSIGN in 372 msec 2024-11-08T02:28:49,849 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-08T02:28:49,849 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731032929849"}]},"ts":"1731032929849"} 2024-11-08T02:28:49,852 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-11-08T02:28:49,854 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-11-08T02:28:49,857 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 471 msec 2024-11-08T02:28:50,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37595 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-08T02:28:50,030 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-11-08T02:28:50,030 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-11-08T02:28:50,032 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-08T02:28:50,042 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-11-08T02:28:50,043 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-08T02:28:50,044 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-11-08T02:28:50,054 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1731032929377.da5507206b6d5da39fa7e8e2a50d6ed6., hostname=331c7316141f,39565,1731032926868, seqNum=2] 2024-11-08T02:28:50,064 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37595 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-11-08T02:28:50,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37595 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-11-08T02:28:50,071 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-11-08T02:28:50,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37595 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-08T02:28:50,073 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-08T02:28:50,074 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-08T02:28:50,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37595 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-08T02:28:50,242 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39565 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-08T02:28:50,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/331c7316141f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1731032929377.da5507206b6d5da39fa7e8e2a50d6ed6. 2024-11-08T02:28:50,247 INFO [RS_FLUSH_OPERATIONS-regionserver/331c7316141f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing da5507206b6d5da39fa7e8e2a50d6ed6 1/1 column families, dataSize=32 B heapSize=360 B 2024-11-08T02:28:50,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/331c7316141f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/data/default/TestHBaseWalOnEC/da5507206b6d5da39fa7e8e2a50d6ed6/.tmp/cf/53fc3b45612645f5a09aed42fd8834b3 is 36, key is row/cf:cq/1731032930057/Put/seqid=0 2024-11-08T02:28:50,312 WARN [RS_FLUSH_OPERATIONS-regionserver/331c7316141f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T02:28:50,313 WARN [RS_FLUSH_OPERATIONS-regionserver/331c7316141f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T02:28:50,317 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_642982442_22 at /127.0.0.1:41800 [Receiving block BP-309047652-172.17.0.2-1731032921345:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:41719:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41800 dst: /127.0.0.1:41719 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T02:28:50,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41719 is added to blk_-9223372036854775648_1025 (size=4787) 2024-11-08T02:28:50,323 WARN [RS_FLUSH_OPERATIONS-regionserver/331c7316141f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T02:28:50,323 INFO [RS_FLUSH_OPERATIONS-regionserver/331c7316141f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/data/default/TestHBaseWalOnEC/da5507206b6d5da39fa7e8e2a50d6ed6/.tmp/cf/53fc3b45612645f5a09aed42fd8834b3 2024-11-08T02:28:50,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/331c7316141f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/data/default/TestHBaseWalOnEC/da5507206b6d5da39fa7e8e2a50d6ed6/.tmp/cf/53fc3b45612645f5a09aed42fd8834b3 as hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/data/default/TestHBaseWalOnEC/da5507206b6d5da39fa7e8e2a50d6ed6/cf/53fc3b45612645f5a09aed42fd8834b3 2024-11-08T02:28:50,372 INFO [RS_FLUSH_OPERATIONS-regionserver/331c7316141f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/data/default/TestHBaseWalOnEC/da5507206b6d5da39fa7e8e2a50d6ed6/cf/53fc3b45612645f5a09aed42fd8834b3, entries=1, sequenceid=5, filesize=4.7 K 2024-11-08T02:28:50,379 INFO [RS_FLUSH_OPERATIONS-regionserver/331c7316141f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for da5507206b6d5da39fa7e8e2a50d6ed6 in 131ms, sequenceid=5, compaction requested=false 2024-11-08T02:28:50,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/331c7316141f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-11-08T02:28:50,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/331c7316141f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for da5507206b6d5da39fa7e8e2a50d6ed6: 2024-11-08T02:28:50,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/331c7316141f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1731032929377.da5507206b6d5da39fa7e8e2a50d6ed6. 2024-11-08T02:28:50,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/331c7316141f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-08T02:28:50,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37595 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-08T02:28:50,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37595 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-08T02:28:50,391 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-08T02:28:50,392 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 313 msec 2024-11-08T02:28:50,396 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 328 msec 2024-11-08T02:28:50,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37595 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-08T02:28:50,699 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-11-08T02:28:50,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-08T02:28:50,714 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-08T02:28:50,714 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T02:28:50,719 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T02:28:50,720 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T02:28:50,720 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-08T02:28:50,720 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-08T02:28:50,720 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1441288993, stopped=false 2024-11-08T02:28:50,721 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=331c7316141f,37595,1731032925968 2024-11-08T02:28:50,780 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37595-0x10118463fa90000, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-08T02:28:50,780 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39565-0x10118463fa90003, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-08T02:28:50,780 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41241-0x10118463fa90002, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-08T02:28:50,780 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42943-0x10118463fa90001, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-08T02:28:50,780 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39565-0x10118463fa90003, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:50,780 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41241-0x10118463fa90002, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:50,780 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42943-0x10118463fa90001, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:50,780 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37595-0x10118463fa90000, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:50,781 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-08T02:28:50,781 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-08T02:28:50,782 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T02:28:50,782 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T02:28:50,782 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39565-0x10118463fa90003, quorum=127.0.0.1:58803, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T02:28:50,782 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41241-0x10118463fa90002, quorum=127.0.0.1:58803, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T02:28:50,783 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37595-0x10118463fa90000, quorum=127.0.0.1:58803, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T02:28:50,783 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42943-0x10118463fa90001, quorum=127.0.0.1:58803, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T02:28:50,783 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '331c7316141f,42943,1731032926723' ***** 2024-11-08T02:28:50,783 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-08T02:28:50,784 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '331c7316141f,41241,1731032926817' ***** 2024-11-08T02:28:50,784 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-08T02:28:50,784 INFO [RS:1;331c7316141f:41241 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-08T02:28:50,784 INFO [RS:0;331c7316141f:42943 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-08T02:28:50,784 INFO [RS:1;331c7316141f:41241 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-08T02:28:50,784 INFO [RS:0;331c7316141f:42943 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-08T02:28:50,784 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-08T02:28:50,784 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-08T02:28:50,785 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '331c7316141f,39565,1731032926868' ***** 2024-11-08T02:28:50,785 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-08T02:28:50,785 INFO [RS:1;331c7316141f:41241 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-08T02:28:50,785 INFO [RS:0;331c7316141f:42943 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-08T02:28:50,785 INFO [RS:0;331c7316141f:42943 {}] regionserver.HRegionServer(959): stopping server 331c7316141f,42943,1731032926723 2024-11-08T02:28:50,785 INFO [RS:2;331c7316141f:39565 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-08T02:28:50,785 INFO [RS:0;331c7316141f:42943 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-08T02:28:50,785 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-08T02:28:50,785 INFO [RS:1;331c7316141f:41241 {}] regionserver.HRegionServer(959): stopping server 331c7316141f,41241,1731032926817 2024-11-08T02:28:50,785 INFO [RS:0;331c7316141f:42943 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;331c7316141f:42943. 2024-11-08T02:28:50,785 INFO [RS:2;331c7316141f:39565 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-08T02:28:50,785 INFO [RS:1;331c7316141f:41241 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-08T02:28:50,785 INFO [RS:2;331c7316141f:39565 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-08T02:28:50,785 DEBUG [RS:0;331c7316141f:42943 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T02:28:50,785 INFO [RS:1;331c7316141f:41241 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;331c7316141f:41241. 2024-11-08T02:28:50,785 DEBUG [RS:0;331c7316141f:42943 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T02:28:50,785 DEBUG [RS:1;331c7316141f:41241 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T02:28:50,786 INFO [RS:2;331c7316141f:39565 {}] regionserver.HRegionServer(3091): Received CLOSE for da5507206b6d5da39fa7e8e2a50d6ed6 2024-11-08T02:28:50,786 DEBUG [RS:1;331c7316141f:41241 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T02:28:50,786 INFO [RS:0;331c7316141f:42943 {}] regionserver.HRegionServer(976): stopping server 331c7316141f,42943,1731032926723; all regions closed. 2024-11-08T02:28:50,786 INFO [RS:1;331c7316141f:41241 {}] regionserver.HRegionServer(976): stopping server 331c7316141f,41241,1731032926817; all regions closed. 2024-11-08T02:28:50,786 INFO [RS:2;331c7316141f:39565 {}] regionserver.HRegionServer(959): stopping server 331c7316141f,39565,1731032926868 2024-11-08T02:28:50,786 INFO [RS:2;331c7316141f:39565 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-08T02:28:50,786 INFO [RS:2;331c7316141f:39565 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;331c7316141f:39565. 2024-11-08T02:28:50,786 DEBUG [RS:2;331c7316141f:39565 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T02:28:50,787 DEBUG [RS:2;331c7316141f:39565 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T02:28:50,787 INFO [regionserver/331c7316141f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T02:28:50,787 INFO [RS:2;331c7316141f:39565 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-08T02:28:50,787 DEBUG [RS_CLOSE_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing da5507206b6d5da39fa7e8e2a50d6ed6, disabling compactions & flushes 2024-11-08T02:28:50,787 INFO [RS:2;331c7316141f:39565 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-08T02:28:50,787 INFO [RS:2;331c7316141f:39565 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-08T02:28:50,787 INFO [RS_CLOSE_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731032929377.da5507206b6d5da39fa7e8e2a50d6ed6. 2024-11-08T02:28:50,787 DEBUG [RS_CLOSE_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731032929377.da5507206b6d5da39fa7e8e2a50d6ed6. 2024-11-08T02:28:50,787 INFO [RS:2;331c7316141f:39565 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-08T02:28:50,787 DEBUG [RS_CLOSE_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731032929377.da5507206b6d5da39fa7e8e2a50d6ed6. after waiting 0 ms 2024-11-08T02:28:50,787 DEBUG [RS_CLOSE_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731032929377.da5507206b6d5da39fa7e8e2a50d6ed6. 2024-11-08T02:28:50,788 INFO [RS:2;331c7316141f:39565 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-08T02:28:50,788 DEBUG [RS:2;331c7316141f:39565 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, da5507206b6d5da39fa7e8e2a50d6ed6=TestHBaseWalOnEC,,1731032929377.da5507206b6d5da39fa7e8e2a50d6ed6.} 2024-11-08T02:28:50,788 DEBUG [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-08T02:28:50,788 INFO [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-08T02:28:50,788 DEBUG [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-08T02:28:50,788 DEBUG [RS:2;331c7316141f:39565 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, da5507206b6d5da39fa7e8e2a50d6ed6 2024-11-08T02:28:50,788 DEBUG [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-08T02:28:50,788 DEBUG [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-08T02:28:50,788 INFO [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-11-08T02:28:50,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741827_1017 (size=93) 2024-11-08T02:28:50,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41719 is added to blk_1073741828_1018 (size=93) 2024-11-08T02:28:50,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41719 is added to blk_1073741827_1017 (size=93) 2024-11-08T02:28:50,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39163 is added to blk_1073741828_1018 (size=93) 2024-11-08T02:28:50,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39163 is added to blk_1073741827_1017 (size=93) 2024-11-08T02:28:50,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741828_1018 (size=93) 2024-11-08T02:28:50,799 DEBUG [RS:0;331c7316141f:42943 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/oldWALs 2024-11-08T02:28:50,799 DEBUG [RS:1;331c7316141f:41241 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/oldWALs 2024-11-08T02:28:50,799 INFO [RS:0;331c7316141f:42943 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 331c7316141f%2C42943%2C1731032926723:(num 1731032928537) 2024-11-08T02:28:50,799 INFO [RS:1;331c7316141f:41241 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 331c7316141f%2C41241%2C1731032926817:(num 1731032928537) 2024-11-08T02:28:50,799 DEBUG [RS:0;331c7316141f:42943 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T02:28:50,799 DEBUG [RS:1;331c7316141f:41241 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T02:28:50,799 INFO [RS:0;331c7316141f:42943 {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T02:28:50,799 INFO [RS:1;331c7316141f:41241 {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T02:28:50,800 INFO [RS:0;331c7316141f:42943 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-08T02:28:50,800 INFO [RS:1;331c7316141f:41241 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-08T02:28:50,800 INFO [RS:0;331c7316141f:42943 {}] hbase.ChoreService(370): Chore service for: regionserver/331c7316141f:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-08T02:28:50,800 INFO [RS:1;331c7316141f:41241 {}] hbase.ChoreService(370): Chore service for: regionserver/331c7316141f:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-08T02:28:50,800 INFO [RS:1;331c7316141f:41241 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-08T02:28:50,800 INFO [RS:0;331c7316141f:42943 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-08T02:28:50,800 INFO [regionserver/331c7316141f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-08T02:28:50,800 INFO [RS:1;331c7316141f:41241 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-08T02:28:50,800 INFO [regionserver/331c7316141f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-08T02:28:50,800 INFO [RS:0;331c7316141f:42943 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-08T02:28:50,800 INFO [RS:1;331c7316141f:41241 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-08T02:28:50,800 INFO [RS:0;331c7316141f:42943 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-08T02:28:50,800 INFO [RS:0;331c7316141f:42943 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-08T02:28:50,800 INFO [RS:1;331c7316141f:41241 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-08T02:28:50,801 INFO [RS:0;331c7316141f:42943 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42943 2024-11-08T02:28:50,801 INFO [RS:1;331c7316141f:41241 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41241 2024-11-08T02:28:50,811 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41241-0x10118463fa90002, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/331c7316141f,41241,1731032926817 2024-11-08T02:28:50,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37595-0x10118463fa90000, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-08T02:28:50,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42943-0x10118463fa90001, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/331c7316141f,42943,1731032926723 2024-11-08T02:28:50,811 INFO [RS:0;331c7316141f:42943 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-08T02:28:50,811 INFO [RS:1;331c7316141f:41241 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-08T02:28:50,812 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [331c7316141f,42943,1731032926723] 2024-11-08T02:28:50,817 DEBUG [RS_CLOSE_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/data/default/TestHBaseWalOnEC/da5507206b6d5da39fa7e8e2a50d6ed6/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-08T02:28:50,820 INFO [RS_CLOSE_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731032929377.da5507206b6d5da39fa7e8e2a50d6ed6. 2024-11-08T02:28:50,820 DEBUG [RS_CLOSE_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for da5507206b6d5da39fa7e8e2a50d6ed6: Waiting for close lock at 1731032930786Running coprocessor pre-close hooks at 1731032930787 (+1 ms)Disabling compacts and flushes for region at 1731032930787Disabling writes for close at 1731032930787Writing region close event to WAL at 1731032930803 (+16 ms)Running coprocessor post-close hooks at 1731032930819 (+16 ms)Closed at 1731032930820 (+1 ms) 2024-11-08T02:28:50,821 DEBUG [RS_CLOSE_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1731032929377.da5507206b6d5da39fa7e8e2a50d6ed6. 2024-11-08T02:28:50,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_-9223372036854775757_1006 (size=196) 2024-11-08T02:28:50,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_-9223372036854775772_1004 (size=42) 2024-11-08T02:28:50,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39163 is added to blk_-9223372036854775773_1004 (size=42) 2024-11-08T02:28:50,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_-9223372036854775740_1008 (size=1189) 2024-11-08T02:28:50,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41719 is added to blk_-9223372036854775724_1010 (size=34) 2024-11-08T02:28:50,831 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/331c7316141f,42943,1731032926723 already deleted, retry=false 2024-11-08T02:28:50,832 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 331c7316141f,42943,1731032926723 expired; onlineServers=2 2024-11-08T02:28:50,832 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [331c7316141f,41241,1731032926817] 2024-11-08T02:28:50,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39163 is added to blk_-9223372036854775756_1006 (size=196) 2024-11-08T02:28:50,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_-9223372036854775725_1010 (size=34) 2024-11-08T02:28:50,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39163 is added to blk_-9223372036854775741_1008 (size=1189) 2024-11-08T02:28:50,835 DEBUG [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/data/hbase/meta/1588230740/.tmp/info/47681028bc2544cd8f1eaf47009f9e7e is 153, key is TestHBaseWalOnEC,,1731032929377.da5507206b6d5da39fa7e8e2a50d6ed6./info:regioninfo/1731032929832/Put/seqid=0 2024-11-08T02:28:50,839 WARN [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T02:28:50,839 WARN [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T02:28:50,842 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/331c7316141f,41241,1731032926817 already deleted, retry=false 2024-11-08T02:28:50,842 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 331c7316141f,41241,1731032926817 expired; onlineServers=1 2024-11-08T02:28:50,844 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_642982442_22 at /127.0.0.1:41854 [Receiving block BP-309047652-172.17.0.2-1731032921345:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:41719:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41854 dst: /127.0.0.1:41719 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T02:28:50,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41719 is added to blk_-9223372036854775632_1027 (size=6637) 2024-11-08T02:28:50,849 WARN [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T02:28:50,849 INFO [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/data/hbase/meta/1588230740/.tmp/info/47681028bc2544cd8f1eaf47009f9e7e 2024-11-08T02:28:50,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41719 is added to blk_-9223372036854775709_1013 (size=1321) 2024-11-08T02:28:50,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_-9223372036854775708_1013 (size=1321) 2024-11-08T02:28:50,880 DEBUG [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/data/hbase/meta/1588230740/.tmp/ns/525864bdf531403595e96264d92e9f07 is 43, key is default/ns:d/1731032929147/Put/seqid=0 2024-11-08T02:28:50,882 WARN [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T02:28:50,882 WARN [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T02:28:50,882 INFO [regionserver/331c7316141f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T02:28:50,883 INFO [regionserver/331c7316141f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T02:28:50,886 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_642982442_22 at /127.0.0.1:54956 [Receiving block BP-309047652-172.17.0.2-1731032921345:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:39163:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54956 dst: /127.0.0.1:39163 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T02:28:50,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39163 is added to blk_-9223372036854775616_1029 (size=5153) 2024-11-08T02:28:50,890 WARN [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T02:28:50,890 INFO [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/data/hbase/meta/1588230740/.tmp/ns/525864bdf531403595e96264d92e9f07 2024-11-08T02:28:50,916 DEBUG [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/data/hbase/meta/1588230740/.tmp/table/5043d6607af44ed98be27010a4e18970 is 52, key is TestHBaseWalOnEC/table:state/1731032929849/Put/seqid=0 2024-11-08T02:28:50,918 WARN [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T02:28:50,919 WARN [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T02:28:50,922 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42943-0x10118463fa90001, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T02:28:50,922 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42943-0x10118463fa90001, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T02:28:50,922 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_642982442_22 at /127.0.0.1:41886 [Receiving block BP-309047652-172.17.0.2-1731032921345:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:41719:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41886 dst: /127.0.0.1:41719 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T02:28:50,922 INFO [RS:1;331c7316141f:41241 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-08T02:28:50,922 INFO [RS:0;331c7316141f:42943 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-08T02:28:50,922 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41241-0x10118463fa90002, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T02:28:50,922 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41241-0x10118463fa90002, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T02:28:50,922 INFO [RS:1;331c7316141f:41241 {}] regionserver.HRegionServer(1031): Exiting; stopping=331c7316141f,41241,1731032926817; zookeeper connection closed. 2024-11-08T02:28:50,922 INFO [RS:0;331c7316141f:42943 {}] regionserver.HRegionServer(1031): Exiting; stopping=331c7316141f,42943,1731032926723; zookeeper connection closed. 2024-11-08T02:28:50,923 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@34f5898b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@34f5898b 2024-11-08T02:28:50,924 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@755aed5a {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@755aed5a 2024-11-08T02:28:50,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41719 is added to blk_-9223372036854775600_1031 (size=5249) 2024-11-08T02:28:50,927 WARN [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T02:28:50,927 INFO [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/data/hbase/meta/1588230740/.tmp/table/5043d6607af44ed98be27010a4e18970 2024-11-08T02:28:50,938 DEBUG [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/data/hbase/meta/1588230740/.tmp/info/47681028bc2544cd8f1eaf47009f9e7e as hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/data/hbase/meta/1588230740/info/47681028bc2544cd8f1eaf47009f9e7e 2024-11-08T02:28:50,949 INFO [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/data/hbase/meta/1588230740/info/47681028bc2544cd8f1eaf47009f9e7e, entries=10, sequenceid=11, filesize=6.5 K 2024-11-08T02:28:50,951 DEBUG [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/data/hbase/meta/1588230740/.tmp/ns/525864bdf531403595e96264d92e9f07 as hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/data/hbase/meta/1588230740/ns/525864bdf531403595e96264d92e9f07 2024-11-08T02:28:50,960 INFO [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/data/hbase/meta/1588230740/ns/525864bdf531403595e96264d92e9f07, entries=2, sequenceid=11, filesize=5.0 K 2024-11-08T02:28:50,962 DEBUG [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/data/hbase/meta/1588230740/.tmp/table/5043d6607af44ed98be27010a4e18970 as hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/data/hbase/meta/1588230740/table/5043d6607af44ed98be27010a4e18970 2024-11-08T02:28:50,971 INFO [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/data/hbase/meta/1588230740/table/5043d6607af44ed98be27010a4e18970, entries=2, sequenceid=11, filesize=5.1 K 2024-11-08T02:28:50,973 INFO [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 184ms, sequenceid=11, compaction requested=false 2024-11-08T02:28:50,973 DEBUG [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-08T02:28:50,981 DEBUG [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-08T02:28:50,982 DEBUG [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-08T02:28:50,982 INFO [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-08T02:28:50,982 DEBUG [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731032930788Running coprocessor pre-close hooks at 1731032930788Disabling compacts and flushes for region at 1731032930788Disabling writes for close at 1731032930788Obtaining lock to block concurrent updates at 1731032930788Preparing flush snapshotting stores in 1588230740 at 1731032930788Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1731032930789 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731032930790 (+1 ms)Flushing 1588230740/info: creating writer at 1731032930790Flushing 1588230740/info: appending metadata at 1731032930827 (+37 ms)Flushing 1588230740/info: closing flushed file at 1731032930827Flushing 1588230740/ns: creating writer at 1731032930861 (+34 ms)Flushing 1588230740/ns: appending metadata at 1731032930879 (+18 ms)Flushing 1588230740/ns: closing flushed file at 1731032930879Flushing 1588230740/table: creating writer at 1731032930898 (+19 ms)Flushing 1588230740/table: appending metadata at 1731032930915 (+17 ms)Flushing 1588230740/table: closing flushed file at 1731032930916 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@63af3db0: reopening flushed file at 1731032930937 (+21 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2d123490: reopening flushed file at 1731032930949 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5d404387: reopening flushed file at 1731032930960 (+11 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 184ms, sequenceid=11, compaction requested=false at 1731032930973 (+13 ms)Writing region close event to WAL at 1731032930974 (+1 ms)Running coprocessor post-close hooks at 1731032930982 (+8 ms)Closed at 1731032930982 2024-11-08T02:28:50,982 DEBUG [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-08T02:28:50,988 INFO [RS:2;331c7316141f:39565 {}] regionserver.HRegionServer(976): stopping server 331c7316141f,39565,1731032926868; all regions closed. 2024-11-08T02:28:50,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41719 is added to blk_1073741829_1019 (size=2751) 2024-11-08T02:28:50,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39163 is added to blk_1073741829_1019 (size=2751) 2024-11-08T02:28:50,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741829_1019 (size=2751) 2024-11-08T02:28:50,994 DEBUG [RS:2;331c7316141f:39565 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/oldWALs 2024-11-08T02:28:50,994 INFO [RS:2;331c7316141f:39565 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 331c7316141f%2C39565%2C1731032926868.meta:.meta(num 1731032928919) 2024-11-08T02:28:50,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39163 is added to blk_1073741826_1016 (size=1298) 2024-11-08T02:28:50,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41719 is added to blk_1073741826_1016 (size=1298) 2024-11-08T02:28:50,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741826_1016 (size=1298) 2024-11-08T02:28:51,000 DEBUG [RS:2;331c7316141f:39565 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/oldWALs 2024-11-08T02:28:51,000 INFO [RS:2;331c7316141f:39565 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 331c7316141f%2C39565%2C1731032926868:(num 1731032928537) 2024-11-08T02:28:51,001 DEBUG [RS:2;331c7316141f:39565 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T02:28:51,001 INFO [RS:2;331c7316141f:39565 {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T02:28:51,001 INFO [RS:2;331c7316141f:39565 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-08T02:28:51,001 INFO [RS:2;331c7316141f:39565 {}] hbase.ChoreService(370): Chore service for: regionserver/331c7316141f:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-08T02:28:51,001 INFO [RS:2;331c7316141f:39565 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-08T02:28:51,001 INFO [regionserver/331c7316141f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-08T02:28:51,001 INFO [RS:2;331c7316141f:39565 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39565 2024-11-08T02:28:51,010 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37595-0x10118463fa90000, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-08T02:28:51,010 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39565-0x10118463fa90003, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/331c7316141f,39565,1731032926868 2024-11-08T02:28:51,010 INFO [RS:2;331c7316141f:39565 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-08T02:28:51,011 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [331c7316141f,39565,1731032926868] 2024-11-08T02:28:51,032 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/331c7316141f,39565,1731032926868 already deleted, retry=false 2024-11-08T02:28:51,032 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 331c7316141f,39565,1731032926868 expired; onlineServers=0 2024-11-08T02:28:51,032 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '331c7316141f,37595,1731032925968' ***** 2024-11-08T02:28:51,032 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-08T02:28:51,032 INFO [M:0;331c7316141f:37595 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-08T02:28:51,032 INFO [M:0;331c7316141f:37595 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-08T02:28:51,032 DEBUG [M:0;331c7316141f:37595 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-08T02:28:51,033 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-08T02:28:51,033 DEBUG [M:0;331c7316141f:37595 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-08T02:28:51,033 DEBUG [master/331c7316141f:0:becomeActiveMaster-HFileCleaner.large.0-1731032928136 {}] cleaner.HFileCleaner(306): Exit Thread[master/331c7316141f:0:becomeActiveMaster-HFileCleaner.large.0-1731032928136,5,FailOnTimeoutGroup] 2024-11-08T02:28:51,033 DEBUG [master/331c7316141f:0:becomeActiveMaster-HFileCleaner.small.0-1731032928137 {}] cleaner.HFileCleaner(306): Exit Thread[master/331c7316141f:0:becomeActiveMaster-HFileCleaner.small.0-1731032928137,5,FailOnTimeoutGroup] 2024-11-08T02:28:51,033 INFO [M:0;331c7316141f:37595 {}] hbase.ChoreService(370): Chore service for: master/331c7316141f:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-08T02:28:51,033 INFO [M:0;331c7316141f:37595 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-08T02:28:51,034 DEBUG [M:0;331c7316141f:37595 {}] master.HMaster(1795): Stopping service threads 2024-11-08T02:28:51,034 INFO [M:0;331c7316141f:37595 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-08T02:28:51,034 INFO [M:0;331c7316141f:37595 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-08T02:28:51,035 INFO [M:0;331c7316141f:37595 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-08T02:28:51,035 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-08T02:28:51,042 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37595-0x10118463fa90000, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-08T02:28:51,042 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37595-0x10118463fa90000, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:51,043 DEBUG [M:0;331c7316141f:37595 {}] zookeeper.ZKUtil(347): master:37595-0x10118463fa90000, quorum=127.0.0.1:58803, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-08T02:28:51,043 WARN [M:0;331c7316141f:37595 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-08T02:28:51,044 INFO [M:0;331c7316141f:37595 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/.lastflushedseqids 2024-11-08T02:28:51,059 WARN [M:0;331c7316141f:37595 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T02:28:51,060 WARN [M:0;331c7316141f:37595 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T02:28:51,062 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1077523476_22 at /127.0.0.1:54980 [Receiving block BP-309047652-172.17.0.2-1731032921345:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:39163:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54980 dst: /127.0.0.1:39163 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T02:28:51,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39163 is added to blk_-9223372036854775584_1033 (size=127) 2024-11-08T02:28:51,066 WARN [M:0;331c7316141f:37595 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T02:28:51,066 INFO [M:0;331c7316141f:37595 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-08T02:28:51,066 INFO [M:0;331c7316141f:37595 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-08T02:28:51,067 DEBUG [M:0;331c7316141f:37595 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-08T02:28:51,067 INFO [M:0;331c7316141f:37595 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T02:28:51,067 DEBUG [M:0;331c7316141f:37595 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T02:28:51,067 DEBUG [M:0;331c7316141f:37595 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-08T02:28:51,067 DEBUG [M:0;331c7316141f:37595 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T02:28:51,067 INFO [M:0;331c7316141f:37595 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.82 KB heapSize=34.11 KB 2024-11-08T02:28:51,083 DEBUG [M:0;331c7316141f:37595 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f12ff43509d44c329807bb21192ea386 is 82, key is hbase:meta,,1/info:regioninfo/1731032929005/Put/seqid=0 2024-11-08T02:28:51,085 WARN [M:0;331c7316141f:37595 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T02:28:51,085 WARN [M:0;331c7316141f:37595 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T02:28:51,092 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1077523476_22 at /127.0.0.1:41916 [Receiving block BP-309047652-172.17.0.2-1731032921345:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:41719:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41916 dst: /127.0.0.1:41719 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T02:28:51,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41719 is added to blk_-9223372036854775568_1035 (size=5672) 2024-11-08T02:28:51,097 WARN [M:0;331c7316141f:37595 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T02:28:51,097 INFO [M:0;331c7316141f:37595 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f12ff43509d44c329807bb21192ea386 2024-11-08T02:28:51,121 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39565-0x10118463fa90003, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T02:28:51,121 INFO [RS:2;331c7316141f:39565 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-08T02:28:51,121 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39565-0x10118463fa90003, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T02:28:51,121 INFO [RS:2;331c7316141f:39565 {}] regionserver.HRegionServer(1031): Exiting; stopping=331c7316141f,39565,1731032926868; zookeeper connection closed. 2024-11-08T02:28:51,122 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2d590467 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2d590467 2024-11-08T02:28:51,122 DEBUG [M:0;331c7316141f:37595 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a2c716645ff44be8919404172232fcae is 747, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731032929856/Put/seqid=0 2024-11-08T02:28:51,122 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-08T02:28:51,124 WARN [M:0;331c7316141f:37595 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T02:28:51,124 WARN [M:0;331c7316141f:37595 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T02:28:51,127 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1077523476_22 at /127.0.0.1:41928 [Receiving block BP-309047652-172.17.0.2-1731032921345:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:41719:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41928 dst: /127.0.0.1:41719 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T02:28:51,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41719 is added to blk_-9223372036854775552_1037 (size=6438) 2024-11-08T02:28:51,131 WARN [M:0;331c7316141f:37595 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T02:28:51,131 INFO [M:0;331c7316141f:37595 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.13 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a2c716645ff44be8919404172232fcae 2024-11-08T02:28:51,156 DEBUG [M:0;331c7316141f:37595 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e115c17df4b84acea9f25da6d16034a0 is 69, key is 331c7316141f,39565,1731032926868/rs:state/1731032928266/Put/seqid=0 2024-11-08T02:28:51,158 WARN [M:0;331c7316141f:37595 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T02:28:51,159 WARN [M:0;331c7316141f:37595 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T02:28:51,164 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1077523476_22 at /127.0.0.1:41954 [Receiving block BP-309047652-172.17.0.2-1731032921345:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:41719:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41954 dst: /127.0.0.1:41719 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T02:28:51,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41719 is added to blk_-9223372036854775536_1039 (size=5294) 2024-11-08T02:28:51,169 WARN [M:0;331c7316141f:37595 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T02:28:51,169 INFO [M:0;331c7316141f:37595 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e115c17df4b84acea9f25da6d16034a0 2024-11-08T02:28:51,179 DEBUG [M:0;331c7316141f:37595 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f12ff43509d44c329807bb21192ea386 as hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f12ff43509d44c329807bb21192ea386 2024-11-08T02:28:51,187 INFO [M:0;331c7316141f:37595 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f12ff43509d44c329807bb21192ea386, entries=8, sequenceid=72, filesize=5.5 K 2024-11-08T02:28:51,189 DEBUG [M:0;331c7316141f:37595 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a2c716645ff44be8919404172232fcae as hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a2c716645ff44be8919404172232fcae 2024-11-08T02:28:51,197 INFO [M:0;331c7316141f:37595 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a2c716645ff44be8919404172232fcae, entries=8, sequenceid=72, filesize=6.3 K 2024-11-08T02:28:51,199 DEBUG [M:0;331c7316141f:37595 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e115c17df4b84acea9f25da6d16034a0 as hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e115c17df4b84acea9f25da6d16034a0 2024-11-08T02:28:51,208 INFO [M:0;331c7316141f:37595 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e115c17df4b84acea9f25da6d16034a0, entries=3, sequenceid=72, filesize=5.2 K 2024-11-08T02:28:51,209 INFO [M:0;331c7316141f:37595 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.82 KB/27459, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 142ms, sequenceid=72, compaction requested=false 2024-11-08T02:28:51,211 INFO [M:0;331c7316141f:37595 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T02:28:51,211 DEBUG [M:0;331c7316141f:37595 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731032931067Disabling compacts and flushes for region at 1731032931067Disabling writes for close at 1731032931067Obtaining lock to block concurrent updates at 1731032931067Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731032931067Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27459, getHeapSize=34864, getOffHeapSize=0, getCellsCount=85 at 1731032931067Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731032931068 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731032931068Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731032931083 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731032931083Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731032931105 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731032931121 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731032931121Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731032931139 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731032931156 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731032931156Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3fbea8c: reopening flushed file at 1731032931177 (+21 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7124f23b: reopening flushed file at 1731032931187 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@cb82513: reopening flushed file at 1731032931197 (+10 ms)Finished flush of dataSize ~26.82 KB/27459, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 142ms, sequenceid=72, compaction requested=false at 1731032931209 (+12 ms)Writing region close event to WAL at 1731032931210 (+1 ms)Closed at 1731032931210 2024-11-08T02:28:51,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41719 is added to blk_1073741825_1011 (size=32662) 2024-11-08T02:28:51,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39163 is added to blk_1073741825_1011 (size=32662) 2024-11-08T02:28:51,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741825_1011 (size=32662) 2024-11-08T02:28:51,214 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-08T02:28:51,215 INFO [M:0;331c7316141f:37595 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-08T02:28:51,215 INFO [M:0;331c7316141f:37595 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37595 2024-11-08T02:28:51,215 INFO [M:0;331c7316141f:37595 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-08T02:28:51,321 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37595-0x10118463fa90000, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T02:28:51,321 INFO [M:0;331c7316141f:37595 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-08T02:28:51,321 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37595-0x10118463fa90000, quorum=127.0.0.1:58803, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T02:28:51,359 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3114ae69{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T02:28:51,364 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3c70a874{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T02:28:51,364 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T02:28:51,364 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5822645a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T02:28:51,364 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@16cd567f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783/hadoop.log.dir/,STOPPED} 2024-11-08T02:28:51,366 WARN [BP-309047652-172.17.0.2-1731032921345 heartbeating to localhost/127.0.0.1:39361 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-08T02:28:51,366 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-08T02:28:51,366 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-08T02:28:51,366 WARN [BP-309047652-172.17.0.2-1731032921345 heartbeating to localhost/127.0.0.1:39361 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-309047652-172.17.0.2-1731032921345 (Datanode Uuid 34b6d3b3-2205-44a8-87db-43f6551f6d3f) service to localhost/127.0.0.1:39361 2024-11-08T02:28:51,367 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783/cluster_bf7ecdbe-5d4f-e873-f842-da2208887abf/data/data5/current/BP-309047652-172.17.0.2-1731032921345 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T02:28:51,368 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783/cluster_bf7ecdbe-5d4f-e873-f842-da2208887abf/data/data6/current/BP-309047652-172.17.0.2-1731032921345 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T02:28:51,368 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-08T02:28:51,371 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@353955e9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T02:28:51,372 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@11738cd8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T02:28:51,372 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T02:28:51,372 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@40eb7053{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T02:28:51,372 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@510fec09{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783/hadoop.log.dir/,STOPPED} 2024-11-08T02:28:51,373 WARN [BP-309047652-172.17.0.2-1731032921345 heartbeating to localhost/127.0.0.1:39361 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-08T02:28:51,373 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-08T02:28:51,373 WARN [BP-309047652-172.17.0.2-1731032921345 heartbeating to localhost/127.0.0.1:39361 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-309047652-172.17.0.2-1731032921345 (Datanode Uuid 6e393b64-2798-4512-9c6d-48dace9da3b8) service to localhost/127.0.0.1:39361 2024-11-08T02:28:51,373 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-08T02:28:51,374 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783/cluster_bf7ecdbe-5d4f-e873-f842-da2208887abf/data/data3/current/BP-309047652-172.17.0.2-1731032921345 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T02:28:51,374 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783/cluster_bf7ecdbe-5d4f-e873-f842-da2208887abf/data/data4/current/BP-309047652-172.17.0.2-1731032921345 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T02:28:51,374 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-08T02:28:51,377 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1b97a472{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T02:28:51,377 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3722a29b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T02:28:51,377 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T02:28:51,377 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@69893329{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T02:28:51,377 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3a5de9e4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783/hadoop.log.dir/,STOPPED} 2024-11-08T02:28:51,379 WARN [BP-309047652-172.17.0.2-1731032921345 heartbeating to localhost/127.0.0.1:39361 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-08T02:28:51,379 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-08T02:28:51,379 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-08T02:28:51,379 WARN [BP-309047652-172.17.0.2-1731032921345 heartbeating to localhost/127.0.0.1:39361 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-309047652-172.17.0.2-1731032921345 (Datanode Uuid 676964a8-a1d3-4b61-a58b-b9d6a169f768) service to localhost/127.0.0.1:39361 2024-11-08T02:28:51,379 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783/cluster_bf7ecdbe-5d4f-e873-f842-da2208887abf/data/data1/current/BP-309047652-172.17.0.2-1731032921345 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T02:28:51,380 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783/cluster_bf7ecdbe-5d4f-e873-f842-da2208887abf/data/data2/current/BP-309047652-172.17.0.2-1731032921345 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T02:28:51,380 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-08T02:28:51,387 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@62d6efd9{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-08T02:28:51,387 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@353d35a1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T02:28:51,387 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T02:28:51,388 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ce709a8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T02:28:51,388 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@760c69c0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783/hadoop.log.dir/,STOPPED} 2024-11-08T02:28:51,395 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-08T02:28:51,424 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-08T02:28:51,430 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=88 (was 161), OpenFileDescriptor=439 (was 393) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=399 (was 390) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=7305 (was 7602) 2024-11-08T02:28:51,436 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=88, OpenFileDescriptor=439, MaxFileDescriptor=1048576, SystemLoadAverage=399, ProcessCount=11, AvailableMemoryMB=7305 2024-11-08T02:28:51,436 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-08T02:28:51,436 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783/hadoop.log.dir so I do NOT create it in target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2 2024-11-08T02:28:51,436 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/56ee8b8b-673d-a0b0-1c04-25b21e7cd783/hadoop.tmp.dir so I do NOT create it in target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2 2024-11-08T02:28:51,436 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2/cluster_5a95b6b1-a8aa-b3fe-044b-1dafdc18eb2c, deleteOnExit=true 2024-11-08T02:28:51,436 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-08T02:28:51,437 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2/test.cache.data in system properties and HBase conf 2024-11-08T02:28:51,437 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2/hadoop.tmp.dir in system properties and HBase conf 2024-11-08T02:28:51,437 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2/hadoop.log.dir in system properties and HBase conf 2024-11-08T02:28:51,437 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-08T02:28:51,437 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-08T02:28:51,437 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-08T02:28:51,437 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-08T02:28:51,437 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-08T02:28:51,437 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-08T02:28:51,437 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-08T02:28:51,437 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-08T02:28:51,438 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-08T02:28:51,438 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-08T02:28:51,438 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-08T02:28:51,438 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-08T02:28:51,438 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-08T02:28:51,438 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2/nfs.dump.dir in system properties and HBase conf 2024-11-08T02:28:51,438 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2/java.io.tmpdir in system properties and HBase conf 2024-11-08T02:28:51,438 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-08T02:28:51,438 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-08T02:28:51,438 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-08T02:28:51,923 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T02:28:51,929 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T02:28:51,930 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T02:28:51,930 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T02:28:51,930 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-08T02:28:51,931 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T02:28:51,931 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4072566{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2/hadoop.log.dir/,AVAILABLE} 2024-11-08T02:28:51,932 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1aa34083{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T02:28:52,026 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4f0384cb{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2/java.io.tmpdir/jetty-localhost-42223-hadoop-hdfs-3_4_1-tests_jar-_-any-10907759981054936032/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-08T02:28:52,027 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7cb6ff0d{HTTP/1.1, (http/1.1)}{localhost:42223} 2024-11-08T02:28:52,027 INFO [Time-limited test {}] server.Server(415): Started @12489ms 2024-11-08T02:28:52,336 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T02:28:52,340 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T02:28:52,341 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T02:28:52,341 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T02:28:52,341 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-08T02:28:52,341 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@68e19264{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2/hadoop.log.dir/,AVAILABLE} 2024-11-08T02:28:52,342 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6decf963{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T02:28:52,436 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5cdad191{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2/java.io.tmpdir/jetty-localhost-39809-hadoop-hdfs-3_4_1-tests_jar-_-any-12887175882478380342/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T02:28:52,437 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@75356c63{HTTP/1.1, (http/1.1)}{localhost:39809} 2024-11-08T02:28:52,437 INFO [Time-limited test {}] server.Server(415): Started @12899ms 2024-11-08T02:28:52,438 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-08T02:28:52,466 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T02:28:52,470 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T02:28:52,471 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T02:28:52,471 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T02:28:52,471 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-08T02:28:52,471 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@72785dee{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2/hadoop.log.dir/,AVAILABLE} 2024-11-08T02:28:52,472 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7febc9c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T02:28:52,565 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@375e6dfb{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2/java.io.tmpdir/jetty-localhost-46491-hadoop-hdfs-3_4_1-tests_jar-_-any-17022085859446745427/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T02:28:52,565 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@56f13ac4{HTTP/1.1, (http/1.1)}{localhost:46491} 2024-11-08T02:28:52,565 INFO [Time-limited test {}] server.Server(415): Started @13027ms 2024-11-08T02:28:52,567 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-08T02:28:52,593 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T02:28:52,596 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T02:28:52,597 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T02:28:52,597 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T02:28:52,597 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-08T02:28:52,597 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3c97cc8b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2/hadoop.log.dir/,AVAILABLE} 2024-11-08T02:28:52,598 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@39179133{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T02:28:52,691 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@59c9153e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2/java.io.tmpdir/jetty-localhost-33757-hadoop-hdfs-3_4_1-tests_jar-_-any-8956858835668642915/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T02:28:52,692 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@40ee720f{HTTP/1.1, (http/1.1)}{localhost:33757} 2024-11-08T02:28:52,692 INFO [Time-limited test {}] server.Server(415): Started @13154ms 2024-11-08T02:28:52,693 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-08T02:28:53,598 WARN [Thread-562 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2/cluster_5a95b6b1-a8aa-b3fe-044b-1dafdc18eb2c/data/data1/current/BP-2060157852-172.17.0.2-1731032931463/current, will proceed with Du for space computation calculation, 2024-11-08T02:28:53,598 WARN [Thread-563 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2/cluster_5a95b6b1-a8aa-b3fe-044b-1dafdc18eb2c/data/data2/current/BP-2060157852-172.17.0.2-1731032931463/current, will proceed with Du for space computation calculation, 2024-11-08T02:28:53,614 WARN [Thread-503 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-08T02:28:53,617 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xeba4a42f12e2d411 with lease ID 0x794272d2123ad2e6: Processing first storage report for DS-62a10a54-74ad-47f4-b09b-f9f55c65d32c from datanode DatanodeRegistration(127.0.0.1:38033, datanodeUuid=0a40e784-03e5-488b-bc9c-436afa2a3f34, infoPort=45271, infoSecurePort=0, ipcPort=37219, storageInfo=lv=-57;cid=testClusterID;nsid=1039181992;c=1731032931463) 2024-11-08T02:28:53,617 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xeba4a42f12e2d411 with lease ID 0x794272d2123ad2e6: from storage DS-62a10a54-74ad-47f4-b09b-f9f55c65d32c node DatanodeRegistration(127.0.0.1:38033, datanodeUuid=0a40e784-03e5-488b-bc9c-436afa2a3f34, infoPort=45271, infoSecurePort=0, ipcPort=37219, storageInfo=lv=-57;cid=testClusterID;nsid=1039181992;c=1731032931463), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T02:28:53,617 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xeba4a42f12e2d411 with lease ID 0x794272d2123ad2e6: Processing first storage report for DS-0c3e9bf5-1d7f-48c2-8692-80bd234e944a from datanode DatanodeRegistration(127.0.0.1:38033, datanodeUuid=0a40e784-03e5-488b-bc9c-436afa2a3f34, infoPort=45271, infoSecurePort=0, ipcPort=37219, storageInfo=lv=-57;cid=testClusterID;nsid=1039181992;c=1731032931463) 2024-11-08T02:28:53,617 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xeba4a42f12e2d411 with lease ID 0x794272d2123ad2e6: from storage DS-0c3e9bf5-1d7f-48c2-8692-80bd234e944a node DatanodeRegistration(127.0.0.1:38033, datanodeUuid=0a40e784-03e5-488b-bc9c-436afa2a3f34, infoPort=45271, infoSecurePort=0, ipcPort=37219, storageInfo=lv=-57;cid=testClusterID;nsid=1039181992;c=1731032931463), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T02:28:53,971 WARN [Thread-574 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2/cluster_5a95b6b1-a8aa-b3fe-044b-1dafdc18eb2c/data/data3/current/BP-2060157852-172.17.0.2-1731032931463/current, will proceed with Du for space computation calculation, 2024-11-08T02:28:53,971 WARN [Thread-575 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2/cluster_5a95b6b1-a8aa-b3fe-044b-1dafdc18eb2c/data/data4/current/BP-2060157852-172.17.0.2-1731032931463/current, will proceed with Du for space computation calculation, 2024-11-08T02:28:53,989 WARN [Thread-526 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-08T02:28:53,991 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x446ebe7b6b5e791b with lease ID 0x794272d2123ad2e7: Processing first storage report for DS-416bf3dd-9835-42dc-898a-28c8a936fa3b from datanode DatanodeRegistration(127.0.0.1:34993, datanodeUuid=653269ca-a5b9-47cd-9561-62727aa24ffe, infoPort=36097, infoSecurePort=0, ipcPort=33507, storageInfo=lv=-57;cid=testClusterID;nsid=1039181992;c=1731032931463) 2024-11-08T02:28:53,992 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x446ebe7b6b5e791b with lease ID 0x794272d2123ad2e7: from storage DS-416bf3dd-9835-42dc-898a-28c8a936fa3b node DatanodeRegistration(127.0.0.1:34993, datanodeUuid=653269ca-a5b9-47cd-9561-62727aa24ffe, infoPort=36097, infoSecurePort=0, ipcPort=33507, storageInfo=lv=-57;cid=testClusterID;nsid=1039181992;c=1731032931463), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T02:28:53,992 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x446ebe7b6b5e791b with lease ID 0x794272d2123ad2e7: Processing first storage report for DS-e6691b7b-e815-42dc-9b53-04df113d1494 from datanode DatanodeRegistration(127.0.0.1:34993, datanodeUuid=653269ca-a5b9-47cd-9561-62727aa24ffe, infoPort=36097, infoSecurePort=0, ipcPort=33507, storageInfo=lv=-57;cid=testClusterID;nsid=1039181992;c=1731032931463) 2024-11-08T02:28:53,992 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x446ebe7b6b5e791b with lease ID 0x794272d2123ad2e7: from storage DS-e6691b7b-e815-42dc-9b53-04df113d1494 node DatanodeRegistration(127.0.0.1:34993, datanodeUuid=653269ca-a5b9-47cd-9561-62727aa24ffe, infoPort=36097, infoSecurePort=0, ipcPort=33507, storageInfo=lv=-57;cid=testClusterID;nsid=1039181992;c=1731032931463), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T02:28:54,089 WARN [Thread-585 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2/cluster_5a95b6b1-a8aa-b3fe-044b-1dafdc18eb2c/data/data5/current/BP-2060157852-172.17.0.2-1731032931463/current, will proceed with Du for space computation calculation, 2024-11-08T02:28:54,090 WARN [Thread-586 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2/cluster_5a95b6b1-a8aa-b3fe-044b-1dafdc18eb2c/data/data6/current/BP-2060157852-172.17.0.2-1731032931463/current, will proceed with Du for space computation calculation, 2024-11-08T02:28:54,112 WARN [Thread-548 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-08T02:28:54,115 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xca83aa7078b7ceab with lease ID 0x794272d2123ad2e8: Processing first storage report for DS-dc16f89b-b734-47e4-9361-cbfe25a7568b from datanode DatanodeRegistration(127.0.0.1:35469, datanodeUuid=1248b51b-90d2-49f9-96cc-e8d5e5161b6f, infoPort=45471, infoSecurePort=0, ipcPort=41137, storageInfo=lv=-57;cid=testClusterID;nsid=1039181992;c=1731032931463) 2024-11-08T02:28:54,115 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xca83aa7078b7ceab with lease ID 0x794272d2123ad2e8: from storage DS-dc16f89b-b734-47e4-9361-cbfe25a7568b node DatanodeRegistration(127.0.0.1:35469, datanodeUuid=1248b51b-90d2-49f9-96cc-e8d5e5161b6f, infoPort=45471, infoSecurePort=0, ipcPort=41137, storageInfo=lv=-57;cid=testClusterID;nsid=1039181992;c=1731032931463), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-08T02:28:54,115 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xca83aa7078b7ceab with lease ID 0x794272d2123ad2e8: Processing first storage report for DS-1728cb8e-dab3-4ed3-adfc-219804c03e61 from datanode DatanodeRegistration(127.0.0.1:35469, datanodeUuid=1248b51b-90d2-49f9-96cc-e8d5e5161b6f, infoPort=45471, infoSecurePort=0, ipcPort=41137, storageInfo=lv=-57;cid=testClusterID;nsid=1039181992;c=1731032931463) 2024-11-08T02:28:54,115 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xca83aa7078b7ceab with lease ID 0x794272d2123ad2e8: from storage DS-1728cb8e-dab3-4ed3-adfc-219804c03e61 node DatanodeRegistration(127.0.0.1:35469, datanodeUuid=1248b51b-90d2-49f9-96cc-e8d5e5161b6f, infoPort=45471, infoSecurePort=0, ipcPort=41137, storageInfo=lv=-57;cid=testClusterID;nsid=1039181992;c=1731032931463), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T02:28:54,137 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2 2024-11-08T02:28:54,141 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2/cluster_5a95b6b1-a8aa-b3fe-044b-1dafdc18eb2c/zookeeper_0, clientPort=56230, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2/cluster_5a95b6b1-a8aa-b3fe-044b-1dafdc18eb2c/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2/cluster_5a95b6b1-a8aa-b3fe-044b-1dafdc18eb2c/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-08T02:28:54,142 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56230 2024-11-08T02:28:54,142 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T02:28:54,144 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T02:28:54,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35469 is added to blk_1073741825_1001 (size=7) 2024-11-08T02:28:54,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34993 is added to blk_1073741825_1001 (size=7) 2024-11-08T02:28:54,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38033 is added to blk_1073741825_1001 (size=7) 2024-11-08T02:28:54,161 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba with version=8 2024-11-08T02:28:54,161 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:39361/user/jenkins/test-data/7c7dfa99-f512-9a69-b5e4-5c147d8d6c7a/hbase-staging 2024-11-08T02:28:54,164 INFO [Time-limited test {}] client.ConnectionUtils(128): master/331c7316141f:0 server-side Connection retries=45 2024-11-08T02:28:54,164 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T02:28:54,164 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-08T02:28:54,164 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-08T02:28:54,164 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T02:28:54,164 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-08T02:28:54,164 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-08T02:28:54,164 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-08T02:28:54,165 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41661 2024-11-08T02:28:54,167 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41661 connecting to ZooKeeper ensemble=127.0.0.1:56230 2024-11-08T02:28:54,225 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:416610x0, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-08T02:28:54,225 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41661-0x101184662a70000 connected 2024-11-08T02:28:54,306 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T02:28:54,308 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T02:28:54,311 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41661-0x101184662a70000, quorum=127.0.0.1:56230, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T02:28:54,312 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba, hbase.cluster.distributed=false 2024-11-08T02:28:54,315 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41661-0x101184662a70000, quorum=127.0.0.1:56230, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-08T02:28:54,316 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41661 2024-11-08T02:28:54,316 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41661 2024-11-08T02:28:54,317 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41661 2024-11-08T02:28:54,317 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41661 2024-11-08T02:28:54,318 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41661 2024-11-08T02:28:54,337 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/331c7316141f:0 server-side Connection retries=45 2024-11-08T02:28:54,337 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T02:28:54,337 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-08T02:28:54,338 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-08T02:28:54,338 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T02:28:54,338 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-08T02:28:54,338 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-08T02:28:54,338 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-08T02:28:54,338 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43669 2024-11-08T02:28:54,340 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43669 connecting to ZooKeeper ensemble=127.0.0.1:56230 2024-11-08T02:28:54,341 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T02:28:54,342 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T02:28:54,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:436690x0, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-08T02:28:54,357 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43669-0x101184662a70001 connected 2024-11-08T02:28:54,357 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43669-0x101184662a70001, quorum=127.0.0.1:56230, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T02:28:54,358 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-08T02:28:54,358 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-08T02:28:54,359 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43669-0x101184662a70001, quorum=127.0.0.1:56230, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-08T02:28:54,360 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43669-0x101184662a70001, quorum=127.0.0.1:56230, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-08T02:28:54,360 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43669 2024-11-08T02:28:54,361 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43669 2024-11-08T02:28:54,361 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43669 2024-11-08T02:28:54,361 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43669 2024-11-08T02:28:54,362 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43669 2024-11-08T02:28:54,380 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/331c7316141f:0 server-side Connection retries=45 2024-11-08T02:28:54,380 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T02:28:54,380 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-08T02:28:54,380 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-08T02:28:54,380 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T02:28:54,381 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-08T02:28:54,381 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-08T02:28:54,381 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-08T02:28:54,381 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37539 2024-11-08T02:28:54,383 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37539 connecting to ZooKeeper ensemble=127.0.0.1:56230 2024-11-08T02:28:54,384 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T02:28:54,385 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T02:28:54,400 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:375390x0, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-08T02:28:54,400 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37539-0x101184662a70002 connected 2024-11-08T02:28:54,400 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37539-0x101184662a70002, quorum=127.0.0.1:56230, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T02:28:54,401 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-08T02:28:54,401 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-08T02:28:54,402 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37539-0x101184662a70002, quorum=127.0.0.1:56230, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-08T02:28:54,403 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37539-0x101184662a70002, quorum=127.0.0.1:56230, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-08T02:28:54,403 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37539 2024-11-08T02:28:54,404 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37539 2024-11-08T02:28:54,404 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37539 2024-11-08T02:28:54,407 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37539 2024-11-08T02:28:54,407 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37539 2024-11-08T02:28:54,422 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/331c7316141f:0 server-side Connection retries=45 2024-11-08T02:28:54,422 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T02:28:54,422 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-08T02:28:54,422 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-08T02:28:54,422 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T02:28:54,422 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-08T02:28:54,422 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-08T02:28:54,422 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-08T02:28:54,423 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41107 2024-11-08T02:28:54,424 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41107 connecting to ZooKeeper ensemble=127.0.0.1:56230 2024-11-08T02:28:54,425 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T02:28:54,426 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T02:28:54,441 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:411070x0, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-08T02:28:54,441 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:411070x0, quorum=127.0.0.1:56230, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T02:28:54,441 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41107-0x101184662a70003 connected 2024-11-08T02:28:54,442 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-08T02:28:54,442 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-08T02:28:54,443 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41107-0x101184662a70003, quorum=127.0.0.1:56230, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-08T02:28:54,444 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41107-0x101184662a70003, quorum=127.0.0.1:56230, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-08T02:28:54,444 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41107 2024-11-08T02:28:54,445 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41107 2024-11-08T02:28:54,445 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41107 2024-11-08T02:28:54,446 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41107 2024-11-08T02:28:54,446 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41107 2024-11-08T02:28:54,459 DEBUG [M:0;331c7316141f:41661 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;331c7316141f:41661 2024-11-08T02:28:54,459 INFO [master/331c7316141f:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/331c7316141f,41661,1731032934163 2024-11-08T02:28:54,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41107-0x101184662a70003, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T02:28:54,463 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37539-0x101184662a70002, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T02:28:54,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43669-0x101184662a70001, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T02:28:54,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41661-0x101184662a70000, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T02:28:54,463 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41661-0x101184662a70000, quorum=127.0.0.1:56230, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/331c7316141f,41661,1731032934163 2024-11-08T02:28:54,470 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-08T02:28:54,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41107-0x101184662a70003, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-08T02:28:54,473 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37539-0x101184662a70002, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-08T02:28:54,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41661-0x101184662a70000, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:54,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43669-0x101184662a70001, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-08T02:28:54,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41107-0x101184662a70003, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:54,474 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37539-0x101184662a70002, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:54,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43669-0x101184662a70001, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:54,475 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41661-0x101184662a70000, quorum=127.0.0.1:56230, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-08T02:28:54,475 INFO [master/331c7316141f:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/331c7316141f,41661,1731032934163 from backup master directory 2024-11-08T02:28:54,484 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41661-0x101184662a70000, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/331c7316141f,41661,1731032934163 2024-11-08T02:28:54,484 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37539-0x101184662a70002, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T02:28:54,484 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43669-0x101184662a70001, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T02:28:54,484 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41107-0x101184662a70003, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T02:28:54,484 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41661-0x101184662a70000, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T02:28:54,484 WARN [master/331c7316141f:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-08T02:28:54,484 INFO [master/331c7316141f:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=331c7316141f,41661,1731032934163 2024-11-08T02:28:54,490 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/hbase.id] with ID: b1a47779-26b5-4fcb-addc-b9a2ffd7b32e 2024-11-08T02:28:54,491 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/.tmp/hbase.id 2024-11-08T02:28:54,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38033 is added to blk_1073741826_1002 (size=42) 2024-11-08T02:28:54,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35469 is added to blk_1073741826_1002 (size=42) 2024-11-08T02:28:54,505 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/.tmp/hbase.id]:[hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/hbase.id] 2024-11-08T02:28:54,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34993 is added to blk_1073741826_1002 (size=42) 2024-11-08T02:28:54,522 INFO [master/331c7316141f:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T02:28:54,523 INFO [master/331c7316141f:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-08T02:28:54,525 INFO [master/331c7316141f:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-08T02:28:54,537 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37539-0x101184662a70002, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:54,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43669-0x101184662a70001, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:54,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41107-0x101184662a70003, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:54,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41661-0x101184662a70000, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:54,537 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T02:28:54,538 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T02:28:54,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38033 is added to blk_1073741827_1003 (size=196) 2024-11-08T02:28:54,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35469 is added to blk_1073741827_1003 (size=196) 2024-11-08T02:28:54,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34993 is added to blk_1073741827_1003 (size=196) 2024-11-08T02:28:54,553 INFO [master/331c7316141f:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-08T02:28:54,554 INFO [master/331c7316141f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-08T02:28:54,556 INFO [master/331c7316141f:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-08T02:28:54,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34993 is added to blk_1073741828_1004 (size=1189) 2024-11-08T02:28:54,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35469 is added to blk_1073741828_1004 (size=1189) 2024-11-08T02:28:54,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38033 is added to blk_1073741828_1004 (size=1189) 2024-11-08T02:28:54,571 INFO [master/331c7316141f:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/MasterData/data/master/store 2024-11-08T02:28:54,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38033 is added to blk_1073741829_1005 (size=34) 2024-11-08T02:28:54,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35469 is added to blk_1073741829_1005 (size=34) 2024-11-08T02:28:54,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34993 is added to blk_1073741829_1005 (size=34) 2024-11-08T02:28:54,581 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T02:28:54,582 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-08T02:28:54,582 INFO [master/331c7316141f:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T02:28:54,582 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T02:28:54,582 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-08T02:28:54,582 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T02:28:54,582 INFO [master/331c7316141f:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T02:28:54,582 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731032934582Disabling compacts and flushes for region at 1731032934582Disabling writes for close at 1731032934582Writing region close event to WAL at 1731032934582Closed at 1731032934582 2024-11-08T02:28:54,583 WARN [master/331c7316141f:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/MasterData/data/master/store/.initializing 2024-11-08T02:28:54,583 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/MasterData/WALs/331c7316141f,41661,1731032934163 2024-11-08T02:28:54,587 INFO [master/331c7316141f:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=331c7316141f%2C41661%2C1731032934163, suffix=, logDir=hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/MasterData/WALs/331c7316141f,41661,1731032934163, archiveDir=hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/MasterData/oldWALs, maxLogs=10 2024-11-08T02:28:54,588 INFO [master/331c7316141f:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 331c7316141f%2C41661%2C1731032934163.1731032934587 2024-11-08T02:28:54,598 INFO [master/331c7316141f:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/MasterData/WALs/331c7316141f,41661,1731032934163/331c7316141f%2C41661%2C1731032934163.1731032934587 2024-11-08T02:28:54,600 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45271:45271),(127.0.0.1/127.0.0.1:36097:36097),(127.0.0.1/127.0.0.1:45471:45471)] 2024-11-08T02:28:54,607 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-08T02:28:54,608 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T02:28:54,608 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T02:28:54,608 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T02:28:54,610 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T02:28:54,612 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-08T02:28:54,612 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T02:28:54,613 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T02:28:54,613 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T02:28:54,615 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-08T02:28:54,615 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T02:28:54,616 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T02:28:54,616 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T02:28:54,619 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-08T02:28:54,619 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T02:28:54,620 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T02:28:54,620 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T02:28:54,622 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-08T02:28:54,622 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T02:28:54,623 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T02:28:54,623 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T02:28:54,624 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-08T02:28:54,624 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-08T02:28:54,626 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T02:28:54,626 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T02:28:54,626 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-08T02:28:54,627 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T02:28:54,633 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-08T02:28:54,634 INFO [master/331c7316141f:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68996835, jitterRate=0.028132960200309753}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-08T02:28:54,634 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731032934608Initializing all the Stores at 1731032934609 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731032934609Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731032934610 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731032934610Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731032934610Cleaning up temporary data from old regions at 1731032934626 (+16 ms)Region opened successfully at 1731032934634 (+8 ms) 2024-11-08T02:28:54,635 INFO [master/331c7316141f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-08T02:28:54,639 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79f14eff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=331c7316141f/172.17.0.2:0 2024-11-08T02:28:54,640 INFO [master/331c7316141f:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-08T02:28:54,640 INFO [master/331c7316141f:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-08T02:28:54,640 INFO [master/331c7316141f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-08T02:28:54,641 INFO [master/331c7316141f:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-08T02:28:54,641 INFO [master/331c7316141f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-08T02:28:54,642 INFO [master/331c7316141f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-08T02:28:54,642 INFO [master/331c7316141f:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-08T02:28:54,645 INFO [master/331c7316141f:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-08T02:28:54,646 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41661-0x101184662a70000, quorum=127.0.0.1:56230, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-08T02:28:54,652 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-08T02:28:54,653 INFO [master/331c7316141f:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-08T02:28:54,653 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41661-0x101184662a70000, quorum=127.0.0.1:56230, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-08T02:28:54,663 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-08T02:28:54,663 INFO [master/331c7316141f:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-08T02:28:54,664 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41661-0x101184662a70000, quorum=127.0.0.1:56230, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-08T02:28:54,673 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-08T02:28:54,675 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41661-0x101184662a70000, quorum=127.0.0.1:56230, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-08T02:28:54,684 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-08T02:28:54,686 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41661-0x101184662a70000, quorum=127.0.0.1:56230, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-08T02:28:54,694 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-08T02:28:54,705 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41661-0x101184662a70000, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-08T02:28:54,705 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37539-0x101184662a70002, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-08T02:28:54,705 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43669-0x101184662a70001, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-08T02:28:54,705 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41107-0x101184662a70003, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-08T02:28:54,705 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41661-0x101184662a70000, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:54,705 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37539-0x101184662a70002, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:54,705 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43669-0x101184662a70001, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:54,705 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41107-0x101184662a70003, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:54,706 INFO [master/331c7316141f:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=331c7316141f,41661,1731032934163, sessionid=0x101184662a70000, setting cluster-up flag (Was=false) 2024-11-08T02:28:54,726 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41661-0x101184662a70000, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:54,726 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43669-0x101184662a70001, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:54,726 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41107-0x101184662a70003, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:54,726 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37539-0x101184662a70002, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:54,758 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-08T02:28:54,763 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=331c7316141f,41661,1731032934163 2024-11-08T02:28:54,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43669-0x101184662a70001, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:54,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41107-0x101184662a70003, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:54,789 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37539-0x101184662a70002, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:54,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41661-0x101184662a70000, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:54,821 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-08T02:28:54,822 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=331c7316141f,41661,1731032934163 2024-11-08T02:28:54,824 INFO [master/331c7316141f:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-08T02:28:54,826 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-08T02:28:54,826 INFO [master/331c7316141f:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-08T02:28:54,827 INFO [master/331c7316141f:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-08T02:28:54,827 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 331c7316141f,41661,1731032934163 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-08T02:28:54,828 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/331c7316141f:0, corePoolSize=5, maxPoolSize=5 2024-11-08T02:28:54,828 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/331c7316141f:0, corePoolSize=5, maxPoolSize=5 2024-11-08T02:28:54,829 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/331c7316141f:0, corePoolSize=5, maxPoolSize=5 2024-11-08T02:28:54,829 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/331c7316141f:0, corePoolSize=5, maxPoolSize=5 2024-11-08T02:28:54,829 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/331c7316141f:0, corePoolSize=10, maxPoolSize=10 2024-11-08T02:28:54,829 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:54,829 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/331c7316141f:0, corePoolSize=2, maxPoolSize=2 2024-11-08T02:28:54,829 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:54,830 INFO [master/331c7316141f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731032964830 2024-11-08T02:28:54,830 INFO [master/331c7316141f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-08T02:28:54,830 INFO [master/331c7316141f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-08T02:28:54,830 INFO [master/331c7316141f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-08T02:28:54,830 INFO [master/331c7316141f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-08T02:28:54,830 INFO [master/331c7316141f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-08T02:28:54,830 INFO [master/331c7316141f:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-08T02:28:54,830 INFO [master/331c7316141f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:54,831 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-08T02:28:54,832 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-08T02:28:54,833 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T02:28:54,833 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-08T02:28:54,835 INFO [master/331c7316141f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-08T02:28:54,835 INFO [master/331c7316141f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-08T02:28:54,835 INFO [master/331c7316141f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-08T02:28:54,837 INFO [master/331c7316141f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-08T02:28:54,837 INFO [master/331c7316141f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-08T02:28:54,838 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/331c7316141f:0:becomeActiveMaster-HFileCleaner.large.0-1731032934838,5,FailOnTimeoutGroup] 2024-11-08T02:28:54,838 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/331c7316141f:0:becomeActiveMaster-HFileCleaner.small.0-1731032934838,5,FailOnTimeoutGroup] 2024-11-08T02:28:54,838 INFO [master/331c7316141f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:54,838 INFO [master/331c7316141f:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-08T02:28:54,839 INFO [master/331c7316141f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:54,839 INFO [master/331c7316141f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:54,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34993 is added to blk_1073741831_1007 (size=1321) 2024-11-08T02:28:54,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38033 is added to blk_1073741831_1007 (size=1321) 2024-11-08T02:28:54,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35469 is added to blk_1073741831_1007 (size=1321) 2024-11-08T02:28:54,846 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-08T02:28:54,846 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba 2024-11-08T02:28:54,850 INFO [RS:0;331c7316141f:43669 {}] regionserver.HRegionServer(746): ClusterId : b1a47779-26b5-4fcb-addc-b9a2ffd7b32e 2024-11-08T02:28:54,850 DEBUG [RS:0;331c7316141f:43669 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-08T02:28:54,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34993 is added to blk_1073741832_1008 (size=32) 2024-11-08T02:28:54,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38033 is added to blk_1073741832_1008 (size=32) 2024-11-08T02:28:54,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35469 is added to blk_1073741832_1008 (size=32) 2024-11-08T02:28:54,860 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T02:28:54,862 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-08T02:28:54,863 INFO [RS:1;331c7316141f:37539 {}] regionserver.HRegionServer(746): ClusterId : b1a47779-26b5-4fcb-addc-b9a2ffd7b32e 2024-11-08T02:28:54,863 INFO [RS:2;331c7316141f:41107 {}] regionserver.HRegionServer(746): ClusterId : b1a47779-26b5-4fcb-addc-b9a2ffd7b32e 2024-11-08T02:28:54,863 DEBUG [RS:2;331c7316141f:41107 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-08T02:28:54,863 DEBUG [RS:1;331c7316141f:37539 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-08T02:28:54,863 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-08T02:28:54,863 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T02:28:54,864 DEBUG [RS:0;331c7316141f:43669 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-08T02:28:54,864 DEBUG [RS:0;331c7316141f:43669 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-08T02:28:54,864 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T02:28:54,864 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-08T02:28:54,865 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-08T02:28:54,865 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T02:28:54,866 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T02:28:54,866 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-08T02:28:54,867 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-08T02:28:54,867 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T02:28:54,868 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T02:28:54,868 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-08T02:28:54,870 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-08T02:28:54,870 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T02:28:54,871 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T02:28:54,871 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-08T02:28:54,872 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/data/hbase/meta/1588230740 2024-11-08T02:28:54,873 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/data/hbase/meta/1588230740 2024-11-08T02:28:54,875 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-08T02:28:54,875 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-08T02:28:54,876 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-08T02:28:54,877 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-08T02:28:54,880 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-08T02:28:54,881 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59277902, jitterRate=-0.11669042706489563}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-08T02:28:54,881 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731032934861Initializing all the Stores at 1731032934861Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731032934861Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731032934862 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731032934862Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731032934862Cleaning up temporary data from old regions at 1731032934875 (+13 ms)Region opened successfully at 1731032934881 (+6 ms) 2024-11-08T02:28:54,881 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-08T02:28:54,882 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-08T02:28:54,882 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-08T02:28:54,882 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-08T02:28:54,882 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-08T02:28:54,882 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-08T02:28:54,882 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731032934881Disabling compacts and flushes for region at 1731032934881Disabling writes for close at 1731032934882 (+1 ms)Writing region close event to WAL at 1731032934882Closed at 1731032934882 2024-11-08T02:28:54,884 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-08T02:28:54,884 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-08T02:28:54,884 DEBUG [RS:2;331c7316141f:41107 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-08T02:28:54,884 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-08T02:28:54,885 DEBUG [RS:2;331c7316141f:41107 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-08T02:28:54,885 DEBUG [RS:0;331c7316141f:43669 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-08T02:28:54,886 DEBUG [RS:0;331c7316141f:43669 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@af638ee, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=331c7316141f/172.17.0.2:0 2024-11-08T02:28:54,886 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-08T02:28:54,888 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-08T02:28:54,892 DEBUG [RS:1;331c7316141f:37539 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-08T02:28:54,892 DEBUG [RS:1;331c7316141f:37539 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-08T02:28:54,899 DEBUG [RS:0;331c7316141f:43669 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;331c7316141f:43669 2024-11-08T02:28:54,900 INFO [RS:0;331c7316141f:43669 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-08T02:28:54,900 INFO [RS:0;331c7316141f:43669 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-08T02:28:54,900 DEBUG [RS:0;331c7316141f:43669 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-08T02:28:54,905 INFO [RS:0;331c7316141f:43669 {}] regionserver.HRegionServer(2659): reportForDuty to master=331c7316141f,41661,1731032934163 with port=43669, startcode=1731032934337 2024-11-08T02:28:54,905 DEBUG [RS:0;331c7316141f:43669 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-08T02:28:54,905 DEBUG [RS:2;331c7316141f:41107 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-08T02:28:54,906 DEBUG [RS:1;331c7316141f:37539 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-08T02:28:54,906 DEBUG [RS:2;331c7316141f:41107 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@637558ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=331c7316141f/172.17.0.2:0 2024-11-08T02:28:54,906 DEBUG [RS:1;331c7316141f:37539 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d2a5167, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=331c7316141f/172.17.0.2:0 2024-11-08T02:28:54,911 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57851, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-08T02:28:54,912 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41661 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 331c7316141f,43669,1731032934337 2024-11-08T02:28:54,912 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41661 {}] master.ServerManager(517): Registering regionserver=331c7316141f,43669,1731032934337 2024-11-08T02:28:54,914 DEBUG [RS:0;331c7316141f:43669 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba 2024-11-08T02:28:54,914 DEBUG [RS:0;331c7316141f:43669 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33393 2024-11-08T02:28:54,915 DEBUG [RS:0;331c7316141f:43669 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-08T02:28:54,919 DEBUG [RS:1;331c7316141f:37539 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;331c7316141f:37539 2024-11-08T02:28:54,919 INFO [RS:1;331c7316141f:37539 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-08T02:28:54,919 INFO [RS:1;331c7316141f:37539 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-08T02:28:54,919 DEBUG [RS:1;331c7316141f:37539 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-08T02:28:54,919 DEBUG [RS:2;331c7316141f:41107 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;331c7316141f:41107 2024-11-08T02:28:54,920 INFO [RS:2;331c7316141f:41107 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-08T02:28:54,920 INFO [RS:2;331c7316141f:41107 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-08T02:28:54,920 DEBUG [RS:2;331c7316141f:41107 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-08T02:28:54,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41661-0x101184662a70000, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-08T02:28:54,926 INFO [RS:2;331c7316141f:41107 {}] regionserver.HRegionServer(2659): reportForDuty to master=331c7316141f,41661,1731032934163 with port=41107, startcode=1731032934421 2024-11-08T02:28:54,926 INFO [RS:1;331c7316141f:37539 {}] regionserver.HRegionServer(2659): reportForDuty to master=331c7316141f,41661,1731032934163 with port=37539, startcode=1731032934380 2024-11-08T02:28:54,926 DEBUG [RS:1;331c7316141f:37539 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-08T02:28:54,927 DEBUG [RS:0;331c7316141f:43669 {}] zookeeper.ZKUtil(111): regionserver:43669-0x101184662a70001, quorum=127.0.0.1:56230, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/331c7316141f,43669,1731032934337 2024-11-08T02:28:54,927 WARN [RS:0;331c7316141f:43669 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-08T02:28:54,927 DEBUG [RS:2;331c7316141f:41107 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-08T02:28:54,927 INFO [RS:0;331c7316141f:43669 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-08T02:28:54,927 DEBUG [RS:0;331c7316141f:43669 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/WALs/331c7316141f,43669,1731032934337 2024-11-08T02:28:54,928 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [331c7316141f,43669,1731032934337] 2024-11-08T02:28:54,929 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34449, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-08T02:28:54,929 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40937, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-08T02:28:54,929 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41661 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 331c7316141f,41107,1731032934421 2024-11-08T02:28:54,930 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41661 {}] master.ServerManager(517): Registering regionserver=331c7316141f,41107,1731032934421 2024-11-08T02:28:54,932 INFO [RS:0;331c7316141f:43669 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-08T02:28:54,932 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41661 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 331c7316141f,37539,1731032934380 2024-11-08T02:28:54,932 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41661 {}] master.ServerManager(517): Registering regionserver=331c7316141f,37539,1731032934380 2024-11-08T02:28:54,932 DEBUG [RS:2;331c7316141f:41107 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba 2024-11-08T02:28:54,932 DEBUG [RS:2;331c7316141f:41107 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33393 2024-11-08T02:28:54,932 DEBUG [RS:2;331c7316141f:41107 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-08T02:28:54,935 DEBUG [RS:1;331c7316141f:37539 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba 2024-11-08T02:28:54,935 DEBUG [RS:1;331c7316141f:37539 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33393 2024-11-08T02:28:54,935 DEBUG [RS:1;331c7316141f:37539 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-08T02:28:54,935 INFO [RS:0;331c7316141f:43669 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-08T02:28:54,940 INFO [RS:0;331c7316141f:43669 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-08T02:28:54,940 INFO [RS:0;331c7316141f:43669 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:54,941 INFO [RS:0;331c7316141f:43669 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-08T02:28:54,942 INFO [RS:0;331c7316141f:43669 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-08T02:28:54,942 INFO [RS:0;331c7316141f:43669 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:54,942 DEBUG [RS:0;331c7316141f:43669 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:54,942 DEBUG [RS:0;331c7316141f:43669 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:54,942 DEBUG [RS:0;331c7316141f:43669 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:54,942 DEBUG [RS:0;331c7316141f:43669 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:54,942 DEBUG [RS:0;331c7316141f:43669 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:54,942 DEBUG [RS:0;331c7316141f:43669 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/331c7316141f:0, corePoolSize=2, maxPoolSize=2 2024-11-08T02:28:54,943 DEBUG [RS:0;331c7316141f:43669 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:54,943 DEBUG [RS:0;331c7316141f:43669 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:54,943 DEBUG [RS:0;331c7316141f:43669 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:54,943 DEBUG [RS:0;331c7316141f:43669 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:54,943 DEBUG [RS:0;331c7316141f:43669 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:54,943 DEBUG [RS:0;331c7316141f:43669 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:54,943 DEBUG [RS:0;331c7316141f:43669 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/331c7316141f:0, corePoolSize=3, maxPoolSize=3 2024-11-08T02:28:54,943 DEBUG [RS:0;331c7316141f:43669 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/331c7316141f:0, corePoolSize=3, maxPoolSize=3 2024-11-08T02:28:54,943 INFO [RS:0;331c7316141f:43669 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:54,944 INFO [RS:0;331c7316141f:43669 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:54,944 INFO [RS:0;331c7316141f:43669 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:54,944 INFO [RS:0;331c7316141f:43669 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:54,944 INFO [RS:0;331c7316141f:43669 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:54,944 INFO [RS:0;331c7316141f:43669 {}] hbase.ChoreService(168): Chore ScheduledChore name=331c7316141f,43669,1731032934337-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-08T02:28:54,958 INFO [RS:0;331c7316141f:43669 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-08T02:28:54,959 INFO [RS:0;331c7316141f:43669 {}] hbase.ChoreService(168): Chore ScheduledChore name=331c7316141f,43669,1731032934337-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:54,959 INFO [RS:0;331c7316141f:43669 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:54,959 INFO [RS:0;331c7316141f:43669 {}] regionserver.Replication(171): 331c7316141f,43669,1731032934337 started 2024-11-08T02:28:54,968 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41661-0x101184662a70000, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-08T02:28:54,972 INFO [RS:0;331c7316141f:43669 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:54,972 INFO [RS:0;331c7316141f:43669 {}] regionserver.HRegionServer(1482): Serving as 331c7316141f,43669,1731032934337, RpcServer on 331c7316141f/172.17.0.2:43669, sessionid=0x101184662a70001 2024-11-08T02:28:54,972 DEBUG [RS:0;331c7316141f:43669 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-08T02:28:54,973 DEBUG [RS:0;331c7316141f:43669 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 331c7316141f,43669,1731032934337 2024-11-08T02:28:54,973 DEBUG [RS:0;331c7316141f:43669 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '331c7316141f,43669,1731032934337' 2024-11-08T02:28:54,973 DEBUG [RS:0;331c7316141f:43669 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-08T02:28:54,989 DEBUG [RS:2;331c7316141f:41107 {}] zookeeper.ZKUtil(111): regionserver:41107-0x101184662a70003, quorum=127.0.0.1:56230, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/331c7316141f,41107,1731032934421 2024-11-08T02:28:54,989 DEBUG [RS:0;331c7316141f:43669 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-08T02:28:54,989 WARN [RS:2;331c7316141f:41107 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-08T02:28:54,989 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [331c7316141f,41107,1731032934421] 2024-11-08T02:28:54,990 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [331c7316141f,37539,1731032934380] 2024-11-08T02:28:54,990 INFO [RS:2;331c7316141f:41107 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-08T02:28:54,990 DEBUG [RS:2;331c7316141f:41107 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/WALs/331c7316141f,41107,1731032934421 2024-11-08T02:28:54,990 DEBUG [RS:1;331c7316141f:37539 {}] zookeeper.ZKUtil(111): regionserver:37539-0x101184662a70002, quorum=127.0.0.1:56230, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/331c7316141f,37539,1731032934380 2024-11-08T02:28:54,990 WARN [RS:1;331c7316141f:37539 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-08T02:28:54,990 DEBUG [RS:0;331c7316141f:43669 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-08T02:28:54,990 DEBUG [RS:0;331c7316141f:43669 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-08T02:28:54,990 INFO [RS:1;331c7316141f:37539 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-08T02:28:54,990 DEBUG [RS:0;331c7316141f:43669 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 331c7316141f,43669,1731032934337 2024-11-08T02:28:54,990 DEBUG [RS:0;331c7316141f:43669 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '331c7316141f,43669,1731032934337' 2024-11-08T02:28:54,990 DEBUG [RS:1;331c7316141f:37539 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/WALs/331c7316141f,37539,1731032934380 2024-11-08T02:28:54,990 DEBUG [RS:0;331c7316141f:43669 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-08T02:28:54,991 DEBUG [RS:0;331c7316141f:43669 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-08T02:28:54,991 DEBUG [RS:0;331c7316141f:43669 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-08T02:28:54,992 INFO [RS:0;331c7316141f:43669 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-08T02:28:54,992 INFO [RS:0;331c7316141f:43669 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-08T02:28:54,995 INFO [RS:2;331c7316141f:41107 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-08T02:28:54,995 INFO [RS:1;331c7316141f:37539 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-08T02:28:55,000 INFO [RS:2;331c7316141f:41107 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-08T02:28:55,000 INFO [RS:2;331c7316141f:41107 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-08T02:28:55,001 INFO [RS:2;331c7316141f:41107 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:55,001 INFO [RS:2;331c7316141f:41107 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-08T02:28:55,002 INFO [RS:2;331c7316141f:41107 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-08T02:28:55,002 INFO [RS:2;331c7316141f:41107 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:55,002 DEBUG [RS:2;331c7316141f:41107 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:55,002 DEBUG [RS:2;331c7316141f:41107 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:55,002 DEBUG [RS:2;331c7316141f:41107 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:55,002 DEBUG [RS:2;331c7316141f:41107 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:55,002 DEBUG [RS:2;331c7316141f:41107 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:55,003 INFO [RS:1;331c7316141f:37539 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-08T02:28:55,003 DEBUG [RS:2;331c7316141f:41107 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/331c7316141f:0, corePoolSize=2, maxPoolSize=2 2024-11-08T02:28:55,003 DEBUG [RS:2;331c7316141f:41107 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:55,003 DEBUG [RS:2;331c7316141f:41107 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:55,003 DEBUG [RS:2;331c7316141f:41107 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:55,003 DEBUG [RS:2;331c7316141f:41107 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:55,003 INFO [RS:1;331c7316141f:37539 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-08T02:28:55,003 DEBUG [RS:2;331c7316141f:41107 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:55,003 INFO [RS:1;331c7316141f:37539 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:55,003 DEBUG [RS:2;331c7316141f:41107 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:55,003 DEBUG [RS:2;331c7316141f:41107 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/331c7316141f:0, corePoolSize=3, maxPoolSize=3 2024-11-08T02:28:55,003 DEBUG [RS:2;331c7316141f:41107 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/331c7316141f:0, corePoolSize=3, maxPoolSize=3 2024-11-08T02:28:55,006 INFO [RS:1;331c7316141f:37539 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-08T02:28:55,007 INFO [RS:2;331c7316141f:41107 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:55,007 INFO [RS:2;331c7316141f:41107 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:55,007 INFO [RS:2;331c7316141f:41107 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:55,007 INFO [RS:2;331c7316141f:41107 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:55,007 INFO [RS:2;331c7316141f:41107 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:55,007 INFO [RS:2;331c7316141f:41107 {}] hbase.ChoreService(168): Chore ScheduledChore name=331c7316141f,41107,1731032934421-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-08T02:28:55,007 INFO [RS:1;331c7316141f:37539 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-08T02:28:55,008 INFO [RS:1;331c7316141f:37539 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:55,008 DEBUG [RS:1;331c7316141f:37539 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:55,008 DEBUG [RS:1;331c7316141f:37539 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:55,008 DEBUG [RS:1;331c7316141f:37539 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:55,008 DEBUG [RS:1;331c7316141f:37539 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:55,008 DEBUG [RS:1;331c7316141f:37539 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:55,008 DEBUG [RS:1;331c7316141f:37539 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/331c7316141f:0, corePoolSize=2, maxPoolSize=2 2024-11-08T02:28:55,008 DEBUG [RS:1;331c7316141f:37539 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:55,008 DEBUG [RS:1;331c7316141f:37539 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:55,009 DEBUG [RS:1;331c7316141f:37539 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:55,009 DEBUG [RS:1;331c7316141f:37539 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:55,009 DEBUG [RS:1;331c7316141f:37539 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:55,009 DEBUG [RS:1;331c7316141f:37539 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/331c7316141f:0, corePoolSize=1, maxPoolSize=1 2024-11-08T02:28:55,009 DEBUG [RS:1;331c7316141f:37539 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/331c7316141f:0, corePoolSize=3, maxPoolSize=3 2024-11-08T02:28:55,009 DEBUG [RS:1;331c7316141f:37539 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/331c7316141f:0, corePoolSize=3, maxPoolSize=3 2024-11-08T02:28:55,012 INFO [RS:1;331c7316141f:37539 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:55,012 INFO [RS:1;331c7316141f:37539 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:55,012 INFO [RS:1;331c7316141f:37539 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:55,012 INFO [RS:1;331c7316141f:37539 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:55,012 INFO [RS:1;331c7316141f:37539 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:55,012 INFO [RS:1;331c7316141f:37539 {}] hbase.ChoreService(168): Chore ScheduledChore name=331c7316141f,37539,1731032934380-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-08T02:28:55,025 INFO [RS:1;331c7316141f:37539 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-08T02:28:55,026 INFO [RS:1;331c7316141f:37539 {}] hbase.ChoreService(168): Chore ScheduledChore name=331c7316141f,37539,1731032934380-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:55,026 INFO [RS:1;331c7316141f:37539 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:55,026 INFO [RS:1;331c7316141f:37539 {}] regionserver.Replication(171): 331c7316141f,37539,1731032934380 started 2024-11-08T02:28:55,026 INFO [RS:2;331c7316141f:41107 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-08T02:28:55,026 INFO [RS:2;331c7316141f:41107 {}] hbase.ChoreService(168): Chore ScheduledChore name=331c7316141f,41107,1731032934421-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:55,026 INFO [RS:2;331c7316141f:41107 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:55,026 INFO [RS:2;331c7316141f:41107 {}] regionserver.Replication(171): 331c7316141f,41107,1731032934421 started 2024-11-08T02:28:55,038 WARN [331c7316141f:41661 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-08T02:28:55,039 INFO [RS:1;331c7316141f:37539 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:55,039 INFO [RS:2;331c7316141f:41107 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:55,039 INFO [RS:2;331c7316141f:41107 {}] regionserver.HRegionServer(1482): Serving as 331c7316141f,41107,1731032934421, RpcServer on 331c7316141f/172.17.0.2:41107, sessionid=0x101184662a70003 2024-11-08T02:28:55,039 INFO [RS:1;331c7316141f:37539 {}] regionserver.HRegionServer(1482): Serving as 331c7316141f,37539,1731032934380, RpcServer on 331c7316141f/172.17.0.2:37539, sessionid=0x101184662a70002 2024-11-08T02:28:55,039 DEBUG [RS:1;331c7316141f:37539 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-08T02:28:55,039 DEBUG [RS:2;331c7316141f:41107 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-08T02:28:55,039 DEBUG [RS:1;331c7316141f:37539 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 331c7316141f,37539,1731032934380 2024-11-08T02:28:55,039 DEBUG [RS:2;331c7316141f:41107 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 331c7316141f,41107,1731032934421 2024-11-08T02:28:55,039 DEBUG [RS:1;331c7316141f:37539 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '331c7316141f,37539,1731032934380' 2024-11-08T02:28:55,039 DEBUG [RS:2;331c7316141f:41107 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '331c7316141f,41107,1731032934421' 2024-11-08T02:28:55,039 DEBUG [RS:1;331c7316141f:37539 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-08T02:28:55,039 DEBUG [RS:2;331c7316141f:41107 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-08T02:28:55,040 DEBUG [RS:1;331c7316141f:37539 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-08T02:28:55,040 DEBUG [RS:2;331c7316141f:41107 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-08T02:28:55,041 DEBUG [RS:2;331c7316141f:41107 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-08T02:28:55,041 DEBUG [RS:1;331c7316141f:37539 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-08T02:28:55,041 DEBUG [RS:2;331c7316141f:41107 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-08T02:28:55,041 DEBUG [RS:1;331c7316141f:37539 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-08T02:28:55,041 DEBUG [RS:2;331c7316141f:41107 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 331c7316141f,41107,1731032934421 2024-11-08T02:28:55,041 DEBUG [RS:1;331c7316141f:37539 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 331c7316141f,37539,1731032934380 2024-11-08T02:28:55,041 DEBUG [RS:2;331c7316141f:41107 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '331c7316141f,41107,1731032934421' 2024-11-08T02:28:55,041 DEBUG [RS:1;331c7316141f:37539 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '331c7316141f,37539,1731032934380' 2024-11-08T02:28:55,041 DEBUG [RS:1;331c7316141f:37539 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-08T02:28:55,041 DEBUG [RS:2;331c7316141f:41107 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-08T02:28:55,042 DEBUG [RS:1;331c7316141f:37539 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-08T02:28:55,042 DEBUG [RS:2;331c7316141f:41107 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-08T02:28:55,042 DEBUG [RS:1;331c7316141f:37539 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-08T02:28:55,042 INFO [RS:1;331c7316141f:37539 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-08T02:28:55,042 DEBUG [RS:2;331c7316141f:41107 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-08T02:28:55,042 INFO [RS:1;331c7316141f:37539 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-08T02:28:55,042 INFO [RS:2;331c7316141f:41107 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-08T02:28:55,042 INFO [RS:2;331c7316141f:41107 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-08T02:28:55,096 INFO [RS:0;331c7316141f:43669 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=331c7316141f%2C43669%2C1731032934337, suffix=, logDir=hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/WALs/331c7316141f,43669,1731032934337, archiveDir=hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/oldWALs, maxLogs=32 2024-11-08T02:28:55,101 INFO [RS:0;331c7316141f:43669 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 331c7316141f%2C43669%2C1731032934337.1731032935101 2024-11-08T02:28:55,110 INFO [RS:0;331c7316141f:43669 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/WALs/331c7316141f,43669,1731032934337/331c7316141f%2C43669%2C1731032934337.1731032935101 2024-11-08T02:28:55,111 DEBUG [RS:0;331c7316141f:43669 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45271:45271),(127.0.0.1/127.0.0.1:36097:36097),(127.0.0.1/127.0.0.1:45471:45471)] 2024-11-08T02:28:55,146 INFO [RS:1;331c7316141f:37539 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=331c7316141f%2C37539%2C1731032934380, suffix=, logDir=hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/WALs/331c7316141f,37539,1731032934380, archiveDir=hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/oldWALs, maxLogs=32 2024-11-08T02:28:55,146 INFO [RS:2;331c7316141f:41107 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=331c7316141f%2C41107%2C1731032934421, suffix=, logDir=hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/WALs/331c7316141f,41107,1731032934421, archiveDir=hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/oldWALs, maxLogs=32 2024-11-08T02:28:55,149 INFO [RS:2;331c7316141f:41107 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 331c7316141f%2C41107%2C1731032934421.1731032935149 2024-11-08T02:28:55,149 INFO [RS:1;331c7316141f:37539 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 331c7316141f%2C37539%2C1731032934380.1731032935149 2024-11-08T02:28:55,159 INFO [RS:2;331c7316141f:41107 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/WALs/331c7316141f,41107,1731032934421/331c7316141f%2C41107%2C1731032934421.1731032935149 2024-11-08T02:28:55,159 INFO [RS:1;331c7316141f:37539 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/WALs/331c7316141f,37539,1731032934380/331c7316141f%2C37539%2C1731032934380.1731032935149 2024-11-08T02:28:55,161 DEBUG [RS:2;331c7316141f:41107 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45271:45271),(127.0.0.1/127.0.0.1:45471:45471),(127.0.0.1/127.0.0.1:36097:36097)] 2024-11-08T02:28:55,161 DEBUG [RS:1;331c7316141f:37539 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36097:36097),(127.0.0.1/127.0.0.1:45471:45471),(127.0.0.1/127.0.0.1:45271:45271)] 2024-11-08T02:28:55,289 DEBUG [331c7316141f:41661 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-08T02:28:55,289 DEBUG [331c7316141f:41661 {}] balancer.BalancerClusterState(204): Hosts are {331c7316141f=0} racks are {/default-rack=0} 2024-11-08T02:28:55,293 DEBUG [331c7316141f:41661 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-08T02:28:55,293 DEBUG [331c7316141f:41661 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-08T02:28:55,293 DEBUG [331c7316141f:41661 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-08T02:28:55,293 DEBUG [331c7316141f:41661 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-08T02:28:55,293 DEBUG [331c7316141f:41661 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-08T02:28:55,293 DEBUG [331c7316141f:41661 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-08T02:28:55,293 INFO [331c7316141f:41661 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-08T02:28:55,293 INFO [331c7316141f:41661 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-08T02:28:55,294 INFO [331c7316141f:41661 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-08T02:28:55,294 DEBUG [331c7316141f:41661 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-08T02:28:55,294 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=331c7316141f,43669,1731032934337 2024-11-08T02:28:55,297 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 331c7316141f,43669,1731032934337, state=OPENING 2024-11-08T02:28:55,379 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-08T02:28:55,389 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37539-0x101184662a70002, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:55,389 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41107-0x101184662a70003, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:55,389 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41661-0x101184662a70000, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:55,389 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43669-0x101184662a70001, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:55,391 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T02:28:55,391 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-08T02:28:55,391 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T02:28:55,391 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T02:28:55,391 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=331c7316141f,43669,1731032934337}] 2024-11-08T02:28:55,391 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T02:28:55,549 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-08T02:28:55,554 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60857, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-08T02:28:55,562 INFO [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-08T02:28:55,563 INFO [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-08T02:28:55,567 INFO [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=331c7316141f%2C43669%2C1731032934337.meta, suffix=.meta, logDir=hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/WALs/331c7316141f,43669,1731032934337, archiveDir=hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/oldWALs, maxLogs=32 2024-11-08T02:28:55,568 INFO [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 331c7316141f%2C43669%2C1731032934337.meta.1731032935568.meta 2024-11-08T02:28:55,578 INFO [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/WALs/331c7316141f,43669,1731032934337/331c7316141f%2C43669%2C1731032934337.meta.1731032935568.meta 2024-11-08T02:28:55,583 DEBUG [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45471:45471),(127.0.0.1/127.0.0.1:36097:36097),(127.0.0.1/127.0.0.1:45271:45271)] 2024-11-08T02:28:55,585 DEBUG [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-08T02:28:55,585 DEBUG [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-08T02:28:55,585 DEBUG [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-08T02:28:55,585 INFO [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-08T02:28:55,585 DEBUG [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-08T02:28:55,585 DEBUG [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T02:28:55,586 DEBUG [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-08T02:28:55,586 DEBUG [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-08T02:28:55,588 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-08T02:28:55,589 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-08T02:28:55,589 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T02:28:55,589 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T02:28:55,589 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-08T02:28:55,590 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-08T02:28:55,590 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T02:28:55,591 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T02:28:55,591 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-08T02:28:55,592 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-08T02:28:55,592 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T02:28:55,592 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T02:28:55,592 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-08T02:28:55,593 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-08T02:28:55,593 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T02:28:55,594 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T02:28:55,594 DEBUG [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-08T02:28:55,595 DEBUG [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/data/hbase/meta/1588230740 2024-11-08T02:28:55,596 DEBUG [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/data/hbase/meta/1588230740 2024-11-08T02:28:55,597 DEBUG [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-08T02:28:55,598 DEBUG [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-08T02:28:55,598 DEBUG [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-08T02:28:55,600 DEBUG [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-08T02:28:55,601 INFO [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68778774, jitterRate=0.02488359808921814}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-08T02:28:55,601 DEBUG [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-08T02:28:55,602 DEBUG [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731032935586Writing region info on filesystem at 1731032935586Initializing all the Stores at 1731032935587 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731032935587Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731032935587Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731032935587Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731032935587Cleaning up temporary data from old regions at 1731032935598 (+11 ms)Running coprocessor post-open hooks at 1731032935601 (+3 ms)Region opened successfully at 1731032935602 (+1 ms) 2024-11-08T02:28:55,604 INFO [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731032935549 2024-11-08T02:28:55,607 DEBUG [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-08T02:28:55,607 INFO [RS_OPEN_META-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-08T02:28:55,608 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=331c7316141f,43669,1731032934337 2024-11-08T02:28:55,610 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 331c7316141f,43669,1731032934337, state=OPEN 2024-11-08T02:28:55,621 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41107-0x101184662a70003, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-08T02:28:55,621 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37539-0x101184662a70002, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-08T02:28:55,621 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41661-0x101184662a70000, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-08T02:28:55,621 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43669-0x101184662a70001, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-08T02:28:55,621 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=331c7316141f,43669,1731032934337 2024-11-08T02:28:55,621 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T02:28:55,621 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T02:28:55,621 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T02:28:55,621 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T02:28:55,626 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-08T02:28:55,626 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=331c7316141f,43669,1731032934337 in 230 msec 2024-11-08T02:28:55,631 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-08T02:28:55,631 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 742 msec 2024-11-08T02:28:55,632 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-08T02:28:55,632 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-08T02:28:55,634 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-08T02:28:55,634 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=331c7316141f,43669,1731032934337, seqNum=-1] 2024-11-08T02:28:55,635 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-08T02:28:55,637 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39021, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-08T02:28:55,646 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 819 msec 2024-11-08T02:28:55,646 INFO [master/331c7316141f:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731032935646, completionTime=-1 2024-11-08T02:28:55,646 INFO [master/331c7316141f:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-08T02:28:55,646 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-08T02:28:55,648 INFO [master/331c7316141f:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=3 2024-11-08T02:28:55,648 INFO [master/331c7316141f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731032995648 2024-11-08T02:28:55,649 INFO [master/331c7316141f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731033055648 2024-11-08T02:28:55,649 INFO [master/331c7316141f:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-08T02:28:55,649 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-08T02:28:55,649 INFO [master/331c7316141f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=331c7316141f,41661,1731032934163-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:55,650 INFO [master/331c7316141f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=331c7316141f,41661,1731032934163-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:55,650 INFO [master/331c7316141f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=331c7316141f,41661,1731032934163-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:55,650 INFO [master/331c7316141f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-331c7316141f:41661, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:55,650 INFO [master/331c7316141f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:55,650 INFO [master/331c7316141f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:55,653 DEBUG [master/331c7316141f:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-08T02:28:55,656 INFO [master/331c7316141f:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.171sec 2024-11-08T02:28:55,656 INFO [master/331c7316141f:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-08T02:28:55,656 INFO [master/331c7316141f:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-08T02:28:55,656 INFO [master/331c7316141f:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-08T02:28:55,656 INFO [master/331c7316141f:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-08T02:28:55,656 INFO [master/331c7316141f:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-08T02:28:55,656 INFO [master/331c7316141f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=331c7316141f,41661,1731032934163-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-08T02:28:55,656 INFO [master/331c7316141f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=331c7316141f,41661,1731032934163-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-08T02:28:55,659 DEBUG [master/331c7316141f:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-08T02:28:55,659 INFO [master/331c7316141f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-08T02:28:55,659 INFO [master/331c7316141f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=331c7316141f,41661,1731032934163-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T02:28:55,756 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@55afdbba, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-08T02:28:55,756 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 331c7316141f,41661,-1 for getting cluster id 2024-11-08T02:28:55,756 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-08T02:28:55,760 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'b1a47779-26b5-4fcb-addc-b9a2ffd7b32e' 2024-11-08T02:28:55,761 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-08T02:28:55,761 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "b1a47779-26b5-4fcb-addc-b9a2ffd7b32e" 2024-11-08T02:28:55,762 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5956710b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-08T02:28:55,762 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [331c7316141f,41661,-1] 2024-11-08T02:28:55,762 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-08T02:28:55,763 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T02:28:55,766 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54508, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-08T02:28:55,768 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74e402b7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-08T02:28:55,768 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-08T02:28:55,770 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=331c7316141f,43669,1731032934337, seqNum=-1] 2024-11-08T02:28:55,770 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-08T02:28:55,772 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34136, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-08T02:28:55,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=331c7316141f,41661,1731032934163 2024-11-08T02:28:55,776 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-08T02:28:55,777 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.AsyncConnectionImpl(321): The fetched master address is 331c7316141f,41661,1731032934163 2024-11-08T02:28:55,777 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@58b0ce25 2024-11-08T02:28:55,778 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-08T02:28:55,780 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54516, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-08T02:28:55,781 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41661 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-08T02:28:55,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41661 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-11-08T02:28:55,786 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-11-08T02:28:55,787 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T02:28:55,787 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41661 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-11-08T02:28:55,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41661 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-08T02:28:55,789 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-08T02:28:55,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38033 is added to blk_1073741837_1013 (size=392) 2024-11-08T02:28:55,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34993 is added to blk_1073741837_1013 (size=392) 2024-11-08T02:28:55,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35469 is added to blk_1073741837_1013 (size=392) 2024-11-08T02:28:55,805 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 42655aef6d51459f9e608f5353bbcafc, NAME => 'TestHBaseWalOnEC,,1731032935781.42655aef6d51459f9e608f5353bbcafc.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba 2024-11-08T02:28:55,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38033 is added to blk_1073741838_1014 (size=51) 2024-11-08T02:28:55,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35469 is added to blk_1073741838_1014 (size=51) 2024-11-08T02:28:55,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34993 is added to blk_1073741838_1014 (size=51) 2024-11-08T02:28:55,815 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731032935781.42655aef6d51459f9e608f5353bbcafc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T02:28:55,815 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 42655aef6d51459f9e608f5353bbcafc, disabling compactions & flushes 2024-11-08T02:28:55,815 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731032935781.42655aef6d51459f9e608f5353bbcafc. 2024-11-08T02:28:55,815 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731032935781.42655aef6d51459f9e608f5353bbcafc. 2024-11-08T02:28:55,815 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731032935781.42655aef6d51459f9e608f5353bbcafc. after waiting 0 ms 2024-11-08T02:28:55,815 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731032935781.42655aef6d51459f9e608f5353bbcafc. 2024-11-08T02:28:55,815 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731032935781.42655aef6d51459f9e608f5353bbcafc. 2024-11-08T02:28:55,815 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 42655aef6d51459f9e608f5353bbcafc: Waiting for close lock at 1731032935815Disabling compacts and flushes for region at 1731032935815Disabling writes for close at 1731032935815Writing region close event to WAL at 1731032935815Closed at 1731032935815 2024-11-08T02:28:55,817 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-11-08T02:28:55,818 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1731032935781.42655aef6d51459f9e608f5353bbcafc.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1731032935817"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731032935817"}]},"ts":"1731032935817"} 2024-11-08T02:28:55,820 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-08T02:28:55,822 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-08T02:28:55,822 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731032935822"}]},"ts":"1731032935822"} 2024-11-08T02:28:55,825 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-11-08T02:28:55,825 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {331c7316141f=0} racks are {/default-rack=0} 2024-11-08T02:28:55,826 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-08T02:28:55,826 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-08T02:28:55,826 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-08T02:28:55,826 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-08T02:28:55,826 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-08T02:28:55,826 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-08T02:28:55,826 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-08T02:28:55,826 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-08T02:28:55,826 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-08T02:28:55,826 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-08T02:28:55,827 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=42655aef6d51459f9e608f5353bbcafc, ASSIGN}] 2024-11-08T02:28:55,829 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=42655aef6d51459f9e608f5353bbcafc, ASSIGN 2024-11-08T02:28:55,830 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=42655aef6d51459f9e608f5353bbcafc, ASSIGN; state=OFFLINE, location=331c7316141f,41107,1731032934421; forceNewPlan=false, retain=false 2024-11-08T02:28:55,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41661 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-08T02:28:55,981 INFO [331c7316141f:41661 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-08T02:28:55,981 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=42655aef6d51459f9e608f5353bbcafc, regionState=OPENING, regionLocation=331c7316141f,41107,1731032934421 2024-11-08T02:28:55,988 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=42655aef6d51459f9e608f5353bbcafc, ASSIGN because future has completed 2024-11-08T02:28:55,989 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 42655aef6d51459f9e608f5353bbcafc, server=331c7316141f,41107,1731032934421}] 2024-11-08T02:28:56,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41661 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-08T02:28:56,146 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-08T02:28:56,148 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54395, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-08T02:28:56,152 INFO [RS_OPEN_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1731032935781.42655aef6d51459f9e608f5353bbcafc. 2024-11-08T02:28:56,152 DEBUG [RS_OPEN_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 42655aef6d51459f9e608f5353bbcafc, NAME => 'TestHBaseWalOnEC,,1731032935781.42655aef6d51459f9e608f5353bbcafc.', STARTKEY => '', ENDKEY => ''} 2024-11-08T02:28:56,152 DEBUG [RS_OPEN_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 42655aef6d51459f9e608f5353bbcafc 2024-11-08T02:28:56,152 DEBUG [RS_OPEN_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731032935781.42655aef6d51459f9e608f5353bbcafc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T02:28:56,153 DEBUG [RS_OPEN_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 42655aef6d51459f9e608f5353bbcafc 2024-11-08T02:28:56,153 DEBUG [RS_OPEN_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 42655aef6d51459f9e608f5353bbcafc 2024-11-08T02:28:56,155 INFO [StoreOpener-42655aef6d51459f9e608f5353bbcafc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 42655aef6d51459f9e608f5353bbcafc 2024-11-08T02:28:56,156 INFO [StoreOpener-42655aef6d51459f9e608f5353bbcafc-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 42655aef6d51459f9e608f5353bbcafc columnFamilyName cf 2024-11-08T02:28:56,156 DEBUG [StoreOpener-42655aef6d51459f9e608f5353bbcafc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T02:28:56,157 INFO [StoreOpener-42655aef6d51459f9e608f5353bbcafc-1 {}] regionserver.HStore(327): Store=42655aef6d51459f9e608f5353bbcafc/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T02:28:56,157 DEBUG [RS_OPEN_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 42655aef6d51459f9e608f5353bbcafc 2024-11-08T02:28:56,158 DEBUG [RS_OPEN_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/data/default/TestHBaseWalOnEC/42655aef6d51459f9e608f5353bbcafc 2024-11-08T02:28:56,158 DEBUG [RS_OPEN_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/data/default/TestHBaseWalOnEC/42655aef6d51459f9e608f5353bbcafc 2024-11-08T02:28:56,159 DEBUG [RS_OPEN_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 42655aef6d51459f9e608f5353bbcafc 2024-11-08T02:28:56,159 DEBUG [RS_OPEN_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 42655aef6d51459f9e608f5353bbcafc 2024-11-08T02:28:56,161 DEBUG [RS_OPEN_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 42655aef6d51459f9e608f5353bbcafc 2024-11-08T02:28:56,164 DEBUG [RS_OPEN_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/data/default/TestHBaseWalOnEC/42655aef6d51459f9e608f5353bbcafc/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-08T02:28:56,164 INFO [RS_OPEN_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 42655aef6d51459f9e608f5353bbcafc; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68371063, jitterRate=0.018808230757713318}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-08T02:28:56,164 DEBUG [RS_OPEN_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 42655aef6d51459f9e608f5353bbcafc 2024-11-08T02:28:56,165 DEBUG [RS_OPEN_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 42655aef6d51459f9e608f5353bbcafc: Running coprocessor pre-open hook at 1731032936153Writing region info on filesystem at 1731032936153Initializing all the Stores at 1731032936154 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731032936154Cleaning up temporary data from old regions at 1731032936159 (+5 ms)Running coprocessor post-open hooks at 1731032936164 (+5 ms)Region opened successfully at 1731032936165 (+1 ms) 2024-11-08T02:28:56,167 INFO [RS_OPEN_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1731032935781.42655aef6d51459f9e608f5353bbcafc., pid=6, masterSystemTime=1731032936146 2024-11-08T02:28:56,170 DEBUG [RS_OPEN_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1731032935781.42655aef6d51459f9e608f5353bbcafc. 2024-11-08T02:28:56,170 INFO [RS_OPEN_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1731032935781.42655aef6d51459f9e608f5353bbcafc. 2024-11-08T02:28:56,171 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=42655aef6d51459f9e608f5353bbcafc, regionState=OPEN, openSeqNum=2, regionLocation=331c7316141f,41107,1731032934421 2024-11-08T02:28:56,174 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 42655aef6d51459f9e608f5353bbcafc, server=331c7316141f,41107,1731032934421 because future has completed 2024-11-08T02:28:56,179 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-08T02:28:56,179 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 42655aef6d51459f9e608f5353bbcafc, server=331c7316141f,41107,1731032934421 in 187 msec 2024-11-08T02:28:56,183 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-08T02:28:56,183 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=42655aef6d51459f9e608f5353bbcafc, ASSIGN in 353 msec 2024-11-08T02:28:56,184 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-08T02:28:56,185 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731032936184"}]},"ts":"1731032936184"} 2024-11-08T02:28:56,187 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-11-08T02:28:56,189 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-11-08T02:28:56,192 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 408 msec 2024-11-08T02:28:56,320 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-08T02:28:56,321 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-08T02:28:56,328 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-08T02:28:56,329 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-08T02:28:56,329 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-08T02:28:56,329 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-08T02:28:56,330 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestHBaseWalOnEC 2024-11-08T02:28:56,330 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestHBaseWalOnEC Metrics about Tables on a single HBase RegionServer 2024-11-08T02:28:56,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41661 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-08T02:28:56,419 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-11-08T02:28:56,419 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-11-08T02:28:56,419 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-08T02:28:56,422 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-11-08T02:28:56,422 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-08T02:28:56,422 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-11-08T02:28:56,426 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1731032935781.42655aef6d51459f9e608f5353bbcafc., hostname=331c7316141f,41107,1731032934421, seqNum=2] 2024-11-08T02:28:56,426 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-08T02:28:56,428 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47822, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-08T02:28:56,432 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41661 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-11-08T02:28:56,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41661 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-11-08T02:28:56,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41661 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-08T02:28:56,436 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-11-08T02:28:56,438 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-08T02:28:56,438 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-08T02:28:56,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41661 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-08T02:28:56,594 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41107 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-08T02:28:56,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/331c7316141f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1731032935781.42655aef6d51459f9e608f5353bbcafc. 2024-11-08T02:28:56,595 INFO [RS_FLUSH_OPERATIONS-regionserver/331c7316141f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 42655aef6d51459f9e608f5353bbcafc 1/1 column families, dataSize=32 B heapSize=360 B 2024-11-08T02:28:56,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/331c7316141f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/data/default/TestHBaseWalOnEC/42655aef6d51459f9e608f5353bbcafc/.tmp/cf/d76d67c515034cdd904da7fe5ab7b930 is 36, key is row/cf:cq/1731032936429/Put/seqid=0 2024-11-08T02:28:56,621 WARN [IPC Server handler 4 on default port 33393 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-08T02:28:56,621 WARN [IPC Server handler 4 on default port 33393 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-08T02:28:56,621 WARN [IPC Server handler 4 on default port 33393 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-08T02:28:56,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34993 is added to blk_1073741839_1015 (size=4787) 2024-11-08T02:28:56,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35469 is added to blk_1073741839_1015 (size=4787) 2024-11-08T02:28:56,628 INFO [RS_FLUSH_OPERATIONS-regionserver/331c7316141f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/data/default/TestHBaseWalOnEC/42655aef6d51459f9e608f5353bbcafc/.tmp/cf/d76d67c515034cdd904da7fe5ab7b930 2024-11-08T02:28:56,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/331c7316141f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/data/default/TestHBaseWalOnEC/42655aef6d51459f9e608f5353bbcafc/.tmp/cf/d76d67c515034cdd904da7fe5ab7b930 as hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/data/default/TestHBaseWalOnEC/42655aef6d51459f9e608f5353bbcafc/cf/d76d67c515034cdd904da7fe5ab7b930 2024-11-08T02:28:56,644 INFO [RS_FLUSH_OPERATIONS-regionserver/331c7316141f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/data/default/TestHBaseWalOnEC/42655aef6d51459f9e608f5353bbcafc/cf/d76d67c515034cdd904da7fe5ab7b930, entries=1, sequenceid=5, filesize=4.7 K 2024-11-08T02:28:56,645 INFO [RS_FLUSH_OPERATIONS-regionserver/331c7316141f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 42655aef6d51459f9e608f5353bbcafc in 50ms, sequenceid=5, compaction requested=false 2024-11-08T02:28:56,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/331c7316141f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 42655aef6d51459f9e608f5353bbcafc: 2024-11-08T02:28:56,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/331c7316141f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1731032935781.42655aef6d51459f9e608f5353bbcafc. 2024-11-08T02:28:56,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/331c7316141f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-08T02:28:56,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41661 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-08T02:28:56,652 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-08T02:28:56,652 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 210 msec 2024-11-08T02:28:56,656 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 221 msec 2024-11-08T02:28:56,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41661 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-08T02:28:56,750 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-11-08T02:28:56,758 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-08T02:28:56,758 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-08T02:28:56,759 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T02:28:56,759 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T02:28:56,759 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T02:28:56,759 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-08T02:28:56,759 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-08T02:28:56,760 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1933268460, stopped=false 2024-11-08T02:28:56,760 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=331c7316141f,41661,1731032934163 2024-11-08T02:28:56,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41107-0x101184662a70003, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-08T02:28:56,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43669-0x101184662a70001, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-08T02:28:56,810 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37539-0x101184662a70002, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-08T02:28:56,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41661-0x101184662a70000, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-08T02:28:56,811 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37539-0x101184662a70002, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:56,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41107-0x101184662a70003, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:56,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43669-0x101184662a70001, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:56,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41661-0x101184662a70000, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:56,811 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-08T02:28:56,812 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-08T02:28:56,812 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41107-0x101184662a70003, quorum=127.0.0.1:56230, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T02:28:56,812 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43669-0x101184662a70001, quorum=127.0.0.1:56230, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T02:28:56,812 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41661-0x101184662a70000, quorum=127.0.0.1:56230, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T02:28:56,812 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T02:28:56,812 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37539-0x101184662a70002, quorum=127.0.0.1:56230, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T02:28:56,813 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T02:28:56,814 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '331c7316141f,43669,1731032934337' ***** 2024-11-08T02:28:56,814 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-08T02:28:56,814 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '331c7316141f,37539,1731032934380' ***** 2024-11-08T02:28:56,814 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-08T02:28:56,814 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '331c7316141f,41107,1731032934421' ***** 2024-11-08T02:28:56,814 INFO [RS:0;331c7316141f:43669 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-08T02:28:56,814 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-08T02:28:56,815 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-08T02:28:56,815 INFO [RS:1;331c7316141f:37539 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-08T02:28:56,815 INFO [RS:0;331c7316141f:43669 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-08T02:28:56,815 INFO [RS:0;331c7316141f:43669 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-08T02:28:56,815 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-08T02:28:56,815 INFO [RS:1;331c7316141f:37539 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-08T02:28:56,815 INFO [RS:2;331c7316141f:41107 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-08T02:28:56,815 INFO [RS:0;331c7316141f:43669 {}] regionserver.HRegionServer(959): stopping server 331c7316141f,43669,1731032934337 2024-11-08T02:28:56,815 INFO [RS:1;331c7316141f:37539 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-08T02:28:56,815 INFO [RS:0;331c7316141f:43669 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-08T02:28:56,815 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-08T02:28:56,815 INFO [RS:2;331c7316141f:41107 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-08T02:28:56,815 INFO [RS:1;331c7316141f:37539 {}] regionserver.HRegionServer(959): stopping server 331c7316141f,37539,1731032934380 2024-11-08T02:28:56,815 INFO [RS:0;331c7316141f:43669 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;331c7316141f:43669. 2024-11-08T02:28:56,815 INFO [RS:2;331c7316141f:41107 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-08T02:28:56,815 INFO [RS:1;331c7316141f:37539 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-08T02:28:56,815 INFO [RS:2;331c7316141f:41107 {}] regionserver.HRegionServer(3091): Received CLOSE for 42655aef6d51459f9e608f5353bbcafc 2024-11-08T02:28:56,815 INFO [RS:1;331c7316141f:37539 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;331c7316141f:37539. 2024-11-08T02:28:56,816 DEBUG [RS:1;331c7316141f:37539 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T02:28:56,816 DEBUG [RS:1;331c7316141f:37539 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T02:28:56,816 INFO [RS:2;331c7316141f:41107 {}] regionserver.HRegionServer(959): stopping server 331c7316141f,41107,1731032934421 2024-11-08T02:28:56,816 DEBUG [RS:0;331c7316141f:43669 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T02:28:56,816 INFO [RS:2;331c7316141f:41107 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-08T02:28:56,816 INFO [RS:1;331c7316141f:37539 {}] regionserver.HRegionServer(976): stopping server 331c7316141f,37539,1731032934380; all regions closed. 2024-11-08T02:28:56,816 DEBUG [RS:0;331c7316141f:43669 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T02:28:56,816 INFO [RS:2;331c7316141f:41107 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;331c7316141f:41107. 2024-11-08T02:28:56,816 DEBUG [RS_CLOSE_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 42655aef6d51459f9e608f5353bbcafc, disabling compactions & flushes 2024-11-08T02:28:56,816 INFO [RS:0;331c7316141f:43669 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-08T02:28:56,816 INFO [RS:0;331c7316141f:43669 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-08T02:28:56,816 INFO [RS_CLOSE_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731032935781.42655aef6d51459f9e608f5353bbcafc. 2024-11-08T02:28:56,816 DEBUG [RS:2;331c7316141f:41107 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T02:28:56,816 INFO [RS:0;331c7316141f:43669 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-08T02:28:56,816 DEBUG [RS_CLOSE_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731032935781.42655aef6d51459f9e608f5353bbcafc. 2024-11-08T02:28:56,816 DEBUG [RS:2;331c7316141f:41107 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T02:28:56,817 INFO [RS:0;331c7316141f:43669 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-08T02:28:56,817 INFO [RS:2;331c7316141f:41107 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-08T02:28:56,817 DEBUG [RS_CLOSE_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731032935781.42655aef6d51459f9e608f5353bbcafc. after waiting 0 ms 2024-11-08T02:28:56,817 DEBUG [RS:2;331c7316141f:41107 {}] regionserver.HRegionServer(1325): Online Regions={42655aef6d51459f9e608f5353bbcafc=TestHBaseWalOnEC,,1731032935781.42655aef6d51459f9e608f5353bbcafc.} 2024-11-08T02:28:56,817 DEBUG [RS_CLOSE_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731032935781.42655aef6d51459f9e608f5353bbcafc. 2024-11-08T02:28:56,817 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T02:28:56,817 DEBUG [RS:2;331c7316141f:41107 {}] regionserver.HRegionServer(1351): Waiting on 42655aef6d51459f9e608f5353bbcafc 2024-11-08T02:28:56,817 INFO [RS:0;331c7316141f:43669 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-08T02:28:56,817 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T02:28:56,817 DEBUG [RS:0;331c7316141f:43669 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-08T02:28:56,817 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T02:28:56,817 DEBUG [RS:0;331c7316141f:43669 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-08T02:28:56,817 DEBUG [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-08T02:28:56,817 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T02:28:56,817 INFO [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-08T02:28:56,818 DEBUG [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-08T02:28:56,818 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T02:28:56,818 DEBUG [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-08T02:28:56,818 DEBUG [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-08T02:28:56,818 INFO [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-11-08T02:28:56,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38033 is added to blk_1073741835_1011 (size=93) 2024-11-08T02:28:56,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35469 is added to blk_1073741835_1011 (size=93) 2024-11-08T02:28:56,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34993 is added to blk_1073741835_1011 (size=93) 2024-11-08T02:28:56,823 DEBUG [RS:1;331c7316141f:37539 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/oldWALs 2024-11-08T02:28:56,823 INFO [RS:1;331c7316141f:37539 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 331c7316141f%2C37539%2C1731032934380:(num 1731032935149) 2024-11-08T02:28:56,823 DEBUG [RS_CLOSE_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/data/default/TestHBaseWalOnEC/42655aef6d51459f9e608f5353bbcafc/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-08T02:28:56,823 DEBUG [RS:1;331c7316141f:37539 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T02:28:56,823 INFO [RS:1;331c7316141f:37539 {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T02:28:56,824 INFO [RS:1;331c7316141f:37539 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-08T02:28:56,824 INFO [RS:1;331c7316141f:37539 {}] hbase.ChoreService(370): Chore service for: regionserver/331c7316141f:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-08T02:28:56,824 INFO [RS:1;331c7316141f:37539 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-08T02:28:56,824 INFO [regionserver/331c7316141f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-08T02:28:56,824 INFO [RS:1;331c7316141f:37539 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-08T02:28:56,824 INFO [RS:1;331c7316141f:37539 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-08T02:28:56,824 INFO [RS:1;331c7316141f:37539 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-08T02:28:56,824 INFO [RS:1;331c7316141f:37539 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37539 2024-11-08T02:28:56,824 INFO [RS_CLOSE_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731032935781.42655aef6d51459f9e608f5353bbcafc. 2024-11-08T02:28:56,825 DEBUG [RS_CLOSE_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 42655aef6d51459f9e608f5353bbcafc: Waiting for close lock at 1731032936816Running coprocessor pre-close hooks at 1731032936816Disabling compacts and flushes for region at 1731032936816Disabling writes for close at 1731032936817 (+1 ms)Writing region close event to WAL at 1731032936818 (+1 ms)Running coprocessor post-close hooks at 1731032936824 (+6 ms)Closed at 1731032936824 2024-11-08T02:28:56,825 DEBUG [RS_CLOSE_REGION-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1731032935781.42655aef6d51459f9e608f5353bbcafc. 2024-11-08T02:28:56,831 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37539-0x101184662a70002, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/331c7316141f,37539,1731032934380 2024-11-08T02:28:56,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41661-0x101184662a70000, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-08T02:28:56,831 INFO [RS:1;331c7316141f:37539 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-08T02:28:56,836 DEBUG [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/data/hbase/meta/1588230740/.tmp/info/15bed7f6042a4f16abb3990e161c7678 is 153, key is TestHBaseWalOnEC,,1731032935781.42655aef6d51459f9e608f5353bbcafc./info:regioninfo/1731032936171/Put/seqid=0 2024-11-08T02:28:56,837 WARN [IPC Server handler 1 on default port 33393 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-08T02:28:56,837 WARN [IPC Server handler 1 on default port 33393 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-08T02:28:56,838 WARN [IPC Server handler 1 on default port 33393 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-08T02:28:56,842 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [331c7316141f,37539,1731032934380] 2024-11-08T02:28:56,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35469 is added to blk_1073741840_1016 (size=6637) 2024-11-08T02:28:56,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34993 is added to blk_1073741840_1016 (size=6637) 2024-11-08T02:28:56,843 INFO [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/data/hbase/meta/1588230740/.tmp/info/15bed7f6042a4f16abb3990e161c7678 2024-11-08T02:28:56,846 INFO [regionserver/331c7316141f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T02:28:56,852 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/331c7316141f,37539,1731032934380 already deleted, retry=false 2024-11-08T02:28:56,852 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 331c7316141f,37539,1731032934380 expired; onlineServers=2 2024-11-08T02:28:56,866 DEBUG [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/data/hbase/meta/1588230740/.tmp/ns/d9f3b284e5a94351ad83a11e713469d1 is 43, key is default/ns:d/1731032935638/Put/seqid=0 2024-11-08T02:28:56,867 WARN [IPC Server handler 3 on default port 33393 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-08T02:28:56,867 WARN [IPC Server handler 3 on default port 33393 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-08T02:28:56,867 WARN [IPC Server handler 3 on default port 33393 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-08T02:28:56,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34993 is added to blk_1073741841_1017 (size=5153) 2024-11-08T02:28:56,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35469 is added to blk_1073741841_1017 (size=5153) 2024-11-08T02:28:56,873 INFO [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/data/hbase/meta/1588230740/.tmp/ns/d9f3b284e5a94351ad83a11e713469d1 2024-11-08T02:28:56,901 DEBUG [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/data/hbase/meta/1588230740/.tmp/table/ce64b185431249dfbf83490c163aef2f is 52, key is TestHBaseWalOnEC/table:state/1731032936184/Put/seqid=0 2024-11-08T02:28:56,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34993 is added to blk_1073741842_1018 (size=5249) 2024-11-08T02:28:56,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35469 is added to blk_1073741842_1018 (size=5249) 2024-11-08T02:28:56,909 INFO [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/data/hbase/meta/1588230740/.tmp/table/ce64b185431249dfbf83490c163aef2f 2024-11-08T02:28:56,909 INFO [regionserver/331c7316141f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T02:28:56,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38033 is added to blk_1073741842_1018 (size=5249) 2024-11-08T02:28:56,914 INFO [regionserver/331c7316141f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T02:28:56,919 DEBUG [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/data/hbase/meta/1588230740/.tmp/info/15bed7f6042a4f16abb3990e161c7678 as hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/data/hbase/meta/1588230740/info/15bed7f6042a4f16abb3990e161c7678 2024-11-08T02:28:56,929 INFO [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/data/hbase/meta/1588230740/info/15bed7f6042a4f16abb3990e161c7678, entries=10, sequenceid=11, filesize=6.5 K 2024-11-08T02:28:56,930 DEBUG [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/data/hbase/meta/1588230740/.tmp/ns/d9f3b284e5a94351ad83a11e713469d1 as hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/data/hbase/meta/1588230740/ns/d9f3b284e5a94351ad83a11e713469d1 2024-11-08T02:28:56,937 INFO [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/data/hbase/meta/1588230740/ns/d9f3b284e5a94351ad83a11e713469d1, entries=2, sequenceid=11, filesize=5.0 K 2024-11-08T02:28:56,939 DEBUG [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/data/hbase/meta/1588230740/.tmp/table/ce64b185431249dfbf83490c163aef2f as hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/data/hbase/meta/1588230740/table/ce64b185431249dfbf83490c163aef2f 2024-11-08T02:28:56,942 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37539-0x101184662a70002, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T02:28:56,942 INFO [RS:1;331c7316141f:37539 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-08T02:28:56,942 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37539-0x101184662a70002, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T02:28:56,942 INFO [RS:1;331c7316141f:37539 {}] regionserver.HRegionServer(1031): Exiting; stopping=331c7316141f,37539,1731032934380; zookeeper connection closed. 2024-11-08T02:28:56,942 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@48fc85b2 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@48fc85b2 2024-11-08T02:28:56,948 INFO [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/data/hbase/meta/1588230740/table/ce64b185431249dfbf83490c163aef2f, entries=2, sequenceid=11, filesize=5.1 K 2024-11-08T02:28:56,949 INFO [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 131ms, sequenceid=11, compaction requested=false 2024-11-08T02:28:56,955 DEBUG [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-08T02:28:56,956 DEBUG [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-08T02:28:56,956 INFO [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-08T02:28:56,956 DEBUG [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731032936817Running coprocessor pre-close hooks at 1731032936817Disabling compacts and flushes for region at 1731032936817Disabling writes for close at 1731032936818 (+1 ms)Obtaining lock to block concurrent updates at 1731032936818Preparing flush snapshotting stores in 1588230740 at 1731032936818Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1731032936818Flushing stores of hbase:meta,,1.1588230740 at 1731032936820 (+2 ms)Flushing 1588230740/info: creating writer at 1731032936820Flushing 1588230740/info: appending metadata at 1731032936836 (+16 ms)Flushing 1588230740/info: closing flushed file at 1731032936836Flushing 1588230740/ns: creating writer at 1731032936850 (+14 ms)Flushing 1588230740/ns: appending metadata at 1731032936865 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1731032936865Flushing 1588230740/table: creating writer at 1731032936882 (+17 ms)Flushing 1588230740/table: appending metadata at 1731032936900 (+18 ms)Flushing 1588230740/table: closing flushed file at 1731032936900Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1761162f: reopening flushed file at 1731032936918 (+18 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@39b30a7f: reopening flushed file at 1731032936929 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@475d4d08: reopening flushed file at 1731032936938 (+9 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 131ms, sequenceid=11, compaction requested=false at 1731032936949 (+11 ms)Writing region close event to WAL at 1731032936950 (+1 ms)Running coprocessor post-close hooks at 1731032936956 (+6 ms)Closed at 1731032936956 2024-11-08T02:28:56,956 DEBUG [RS_CLOSE_META-regionserver/331c7316141f:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-08T02:28:57,017 INFO [RS:2;331c7316141f:41107 {}] regionserver.HRegionServer(976): stopping server 331c7316141f,41107,1731032934421; all regions closed. 2024-11-08T02:28:57,017 INFO [RS:0;331c7316141f:43669 {}] regionserver.HRegionServer(976): stopping server 331c7316141f,43669,1731032934337; all regions closed. 2024-11-08T02:28:57,018 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T02:28:57,018 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T02:28:57,018 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T02:28:57,018 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T02:28:57,018 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T02:28:57,018 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T02:28:57,018 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T02:28:57,018 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T02:28:57,018 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T02:28:57,019 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T02:28:57,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38033 is added to blk_1073741836_1012 (size=2751) 2024-11-08T02:28:57,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34993 is added to blk_1073741834_1010 (size=1298) 2024-11-08T02:28:57,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35469 is added to blk_1073741834_1010 (size=1298) 2024-11-08T02:28:57,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35469 is added to blk_1073741836_1012 (size=2751) 2024-11-08T02:28:57,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38033 is added to blk_1073741834_1010 (size=1298) 2024-11-08T02:28:57,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34993 is added to blk_1073741836_1012 (size=2751) 2024-11-08T02:28:57,026 DEBUG [RS:0;331c7316141f:43669 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/oldWALs 2024-11-08T02:28:57,026 INFO [RS:0;331c7316141f:43669 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 331c7316141f%2C43669%2C1731032934337.meta:.meta(num 1731032935568) 2024-11-08T02:28:57,026 DEBUG [RS:2;331c7316141f:41107 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/oldWALs 2024-11-08T02:28:57,026 INFO [RS:2;331c7316141f:41107 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 331c7316141f%2C41107%2C1731032934421:(num 1731032935149) 2024-11-08T02:28:57,026 DEBUG [RS:2;331c7316141f:41107 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T02:28:57,026 INFO [RS:2;331c7316141f:41107 {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T02:28:57,026 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T02:28:57,026 INFO [RS:2;331c7316141f:41107 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-08T02:28:57,026 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T02:28:57,026 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T02:28:57,027 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T02:28:57,027 INFO [RS:2;331c7316141f:41107 {}] hbase.ChoreService(370): Chore service for: regionserver/331c7316141f:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-08T02:28:57,027 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T02:28:57,027 INFO [RS:2;331c7316141f:41107 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-08T02:28:57,027 INFO [RS:2;331c7316141f:41107 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-08T02:28:57,027 INFO [RS:2;331c7316141f:41107 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-08T02:28:57,027 INFO [RS:2;331c7316141f:41107 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-08T02:28:57,027 INFO [RS:2;331c7316141f:41107 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41107 2024-11-08T02:28:57,028 INFO [regionserver/331c7316141f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-08T02:28:57,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35469 is added to blk_1073741833_1009 (size=93) 2024-11-08T02:28:57,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34993 is added to blk_1073741833_1009 (size=93) 2024-11-08T02:28:57,030 INFO [regionserver/331c7316141f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-08T02:28:57,030 INFO [regionserver/331c7316141f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-08T02:28:57,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38033 is added to blk_1073741833_1009 (size=93) 2024-11-08T02:28:57,033 DEBUG [RS:0;331c7316141f:43669 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/oldWALs 2024-11-08T02:28:57,033 INFO [RS:0;331c7316141f:43669 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 331c7316141f%2C43669%2C1731032934337:(num 1731032935101) 2024-11-08T02:28:57,033 DEBUG [RS:0;331c7316141f:43669 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T02:28:57,033 INFO [RS:0;331c7316141f:43669 {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T02:28:57,034 INFO [RS:0;331c7316141f:43669 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-08T02:28:57,034 INFO [RS:0;331c7316141f:43669 {}] hbase.ChoreService(370): Chore service for: regionserver/331c7316141f:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-08T02:28:57,034 INFO [RS:0;331c7316141f:43669 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-08T02:28:57,034 INFO [regionserver/331c7316141f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-08T02:28:57,034 INFO [RS:0;331c7316141f:43669 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43669 2024-11-08T02:28:57,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41661-0x101184662a70000, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-08T02:28:57,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41107-0x101184662a70003, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/331c7316141f,41107,1731032934421 2024-11-08T02:28:57,094 INFO [RS:2;331c7316141f:41107 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-08T02:28:57,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43669-0x101184662a70001, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/331c7316141f,43669,1731032934337 2024-11-08T02:28:57,105 INFO [RS:0;331c7316141f:43669 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-08T02:28:57,115 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [331c7316141f,43669,1731032934337] 2024-11-08T02:28:57,136 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/331c7316141f,43669,1731032934337 already deleted, retry=false 2024-11-08T02:28:57,136 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 331c7316141f,43669,1731032934337 expired; onlineServers=1 2024-11-08T02:28:57,137 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [331c7316141f,41107,1731032934421] 2024-11-08T02:28:57,147 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/331c7316141f,41107,1731032934421 already deleted, retry=false 2024-11-08T02:28:57,147 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 331c7316141f,41107,1731032934421 expired; onlineServers=0 2024-11-08T02:28:57,147 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '331c7316141f,41661,1731032934163' ***** 2024-11-08T02:28:57,147 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-08T02:28:57,147 INFO [M:0;331c7316141f:41661 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-08T02:28:57,147 INFO [M:0;331c7316141f:41661 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-08T02:28:57,147 DEBUG [M:0;331c7316141f:41661 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-08T02:28:57,147 DEBUG [M:0;331c7316141f:41661 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-08T02:28:57,147 DEBUG [master/331c7316141f:0:becomeActiveMaster-HFileCleaner.large.0-1731032934838 {}] cleaner.HFileCleaner(306): Exit Thread[master/331c7316141f:0:becomeActiveMaster-HFileCleaner.large.0-1731032934838,5,FailOnTimeoutGroup] 2024-11-08T02:28:57,147 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-08T02:28:57,148 DEBUG [master/331c7316141f:0:becomeActiveMaster-HFileCleaner.small.0-1731032934838 {}] cleaner.HFileCleaner(306): Exit Thread[master/331c7316141f:0:becomeActiveMaster-HFileCleaner.small.0-1731032934838,5,FailOnTimeoutGroup] 2024-11-08T02:28:57,148 INFO [M:0;331c7316141f:41661 {}] hbase.ChoreService(370): Chore service for: master/331c7316141f:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-08T02:28:57,148 INFO [M:0;331c7316141f:41661 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-08T02:28:57,148 DEBUG [M:0;331c7316141f:41661 {}] master.HMaster(1795): Stopping service threads 2024-11-08T02:28:57,148 INFO [M:0;331c7316141f:41661 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-08T02:28:57,148 INFO [M:0;331c7316141f:41661 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-08T02:28:57,148 INFO [M:0;331c7316141f:41661 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-08T02:28:57,149 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-08T02:28:57,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41661-0x101184662a70000, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-08T02:28:57,158 DEBUG [M:0;331c7316141f:41661 {}] zookeeper.ZKUtil(347): master:41661-0x101184662a70000, quorum=127.0.0.1:56230, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-08T02:28:57,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41661-0x101184662a70000, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T02:28:57,158 WARN [M:0;331c7316141f:41661 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-08T02:28:57,158 INFO [M:0;331c7316141f:41661 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/.lastflushedseqids 2024-11-08T02:28:57,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38033 is added to blk_1073741843_1019 (size=127) 2024-11-08T02:28:57,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34993 is added to blk_1073741843_1019 (size=127) 2024-11-08T02:28:57,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35469 is added to blk_1073741843_1019 (size=127) 2024-11-08T02:28:57,169 INFO [M:0;331c7316141f:41661 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-08T02:28:57,170 INFO [M:0;331c7316141f:41661 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-08T02:28:57,170 DEBUG [M:0;331c7316141f:41661 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-08T02:28:57,170 INFO [M:0;331c7316141f:41661 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T02:28:57,170 DEBUG [M:0;331c7316141f:41661 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T02:28:57,170 DEBUG [M:0;331c7316141f:41661 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-08T02:28:57,170 DEBUG [M:0;331c7316141f:41661 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T02:28:57,170 INFO [M:0;331c7316141f:41661 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.82 KB heapSize=34.11 KB 2024-11-08T02:28:57,188 DEBUG [M:0;331c7316141f:41661 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/299047b246194ea49622774e6c579523 is 82, key is hbase:meta,,1/info:regioninfo/1731032935608/Put/seqid=0 2024-11-08T02:28:57,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34993 is added to blk_1073741844_1020 (size=5672) 2024-11-08T02:28:57,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35469 is added to blk_1073741844_1020 (size=5672) 2024-11-08T02:28:57,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38033 is added to blk_1073741844_1020 (size=5672) 2024-11-08T02:28:57,196 INFO [M:0;331c7316141f:41661 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/299047b246194ea49622774e6c579523 2024-11-08T02:28:57,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41107-0x101184662a70003, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T02:28:57,215 INFO [RS:2;331c7316141f:41107 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-08T02:28:57,216 INFO [RS:2;331c7316141f:41107 {}] regionserver.HRegionServer(1031): Exiting; stopping=331c7316141f,41107,1731032934421; zookeeper connection closed. 2024-11-08T02:28:57,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41107-0x101184662a70003, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T02:28:57,216 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@15f6a580 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@15f6a580 2024-11-08T02:28:57,218 DEBUG [M:0;331c7316141f:41661 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/207ba006d26643fa9768989a27dcf2c4 is 747, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731032936191/Put/seqid=0 2024-11-08T02:28:57,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34993 is added to blk_1073741845_1021 (size=6438) 2024-11-08T02:28:57,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35469 is added to blk_1073741845_1021 (size=6438) 2024-11-08T02:28:57,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38033 is added to blk_1073741845_1021 (size=6438) 2024-11-08T02:28:57,225 INFO [M:0;331c7316141f:41661 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.13 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/207ba006d26643fa9768989a27dcf2c4 2024-11-08T02:28:57,226 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43669-0x101184662a70001, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T02:28:57,226 INFO [RS:0;331c7316141f:43669 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-08T02:28:57,226 INFO [RS:0;331c7316141f:43669 {}] regionserver.HRegionServer(1031): Exiting; stopping=331c7316141f,43669,1731032934337; zookeeper connection closed. 2024-11-08T02:28:57,226 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43669-0x101184662a70001, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T02:28:57,226 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@55085ed7 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@55085ed7 2024-11-08T02:28:57,227 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-08T02:28:57,245 DEBUG [M:0;331c7316141f:41661 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7c16c781166d440da774fd94dfc36c62 is 69, key is 331c7316141f,37539,1731032934380/rs:state/1731032934932/Put/seqid=0 2024-11-08T02:28:57,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35469 is added to blk_1073741846_1022 (size=5294) 2024-11-08T02:28:57,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34993 is added to blk_1073741846_1022 (size=5294) 2024-11-08T02:28:57,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38033 is added to blk_1073741846_1022 (size=5294) 2024-11-08T02:28:57,252 INFO [M:0;331c7316141f:41661 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7c16c781166d440da774fd94dfc36c62 2024-11-08T02:28:57,258 DEBUG [M:0;331c7316141f:41661 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/299047b246194ea49622774e6c579523 as hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/299047b246194ea49622774e6c579523 2024-11-08T02:28:57,264 INFO [M:0;331c7316141f:41661 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/299047b246194ea49622774e6c579523, entries=8, sequenceid=72, filesize=5.5 K 2024-11-08T02:28:57,266 DEBUG [M:0;331c7316141f:41661 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/207ba006d26643fa9768989a27dcf2c4 as hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/207ba006d26643fa9768989a27dcf2c4 2024-11-08T02:28:57,272 INFO [M:0;331c7316141f:41661 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/207ba006d26643fa9768989a27dcf2c4, entries=8, sequenceid=72, filesize=6.3 K 2024-11-08T02:28:57,274 DEBUG [M:0;331c7316141f:41661 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7c16c781166d440da774fd94dfc36c62 as hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7c16c781166d440da774fd94dfc36c62 2024-11-08T02:28:57,280 INFO [M:0;331c7316141f:41661 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33393/user/jenkins/test-data/05f577c6-14c2-e662-d397-7678d40538ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7c16c781166d440da774fd94dfc36c62, entries=3, sequenceid=72, filesize=5.2 K 2024-11-08T02:28:57,282 INFO [M:0;331c7316141f:41661 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.82 KB/27459, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 111ms, sequenceid=72, compaction requested=false 2024-11-08T02:28:57,283 INFO [M:0;331c7316141f:41661 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T02:28:57,283 DEBUG [M:0;331c7316141f:41661 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731032937170Disabling compacts and flushes for region at 1731032937170Disabling writes for close at 1731032937170Obtaining lock to block concurrent updates at 1731032937170Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731032937170Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27459, getHeapSize=34864, getOffHeapSize=0, getCellsCount=85 at 1731032937171 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731032937171Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731032937172 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731032937187 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731032937187Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731032937202 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731032937217 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731032937217Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731032937231 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731032937245 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731032937245Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7fa54ffc: reopening flushed file at 1731032937257 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@25213f55: reopening flushed file at 1731032937265 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5725166d: reopening flushed file at 1731032937272 (+7 ms)Finished flush of dataSize ~26.82 KB/27459, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 111ms, sequenceid=72, compaction requested=false at 1731032937282 (+10 ms)Writing region close event to WAL at 1731032937283 (+1 ms)Closed at 1731032937283 2024-11-08T02:28:57,283 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T02:28:57,284 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T02:28:57,284 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T02:28:57,284 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T02:28:57,284 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T02:28:57,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35469 is added to blk_1073741830_1006 (size=32662) 2024-11-08T02:28:57,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34993 is added to blk_1073741830_1006 (size=32662) 2024-11-08T02:28:57,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38033 is added to blk_1073741830_1006 (size=32662) 2024-11-08T02:28:57,287 INFO [M:0;331c7316141f:41661 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-08T02:28:57,287 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-08T02:28:57,287 INFO [M:0;331c7316141f:41661 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41661 2024-11-08T02:28:57,287 INFO [M:0;331c7316141f:41661 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-08T02:28:57,394 INFO [M:0;331c7316141f:41661 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-08T02:28:57,394 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41661-0x101184662a70000, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T02:28:57,395 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41661-0x101184662a70000, quorum=127.0.0.1:56230, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T02:28:57,397 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@59c9153e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T02:28:57,397 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@40ee720f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T02:28:57,398 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T02:28:57,398 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@39179133{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T02:28:57,398 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3c97cc8b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2/hadoop.log.dir/,STOPPED} 2024-11-08T02:28:57,399 WARN [BP-2060157852-172.17.0.2-1731032931463 heartbeating to localhost/127.0.0.1:33393 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-08T02:28:57,399 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-08T02:28:57,399 WARN [BP-2060157852-172.17.0.2-1731032931463 heartbeating to localhost/127.0.0.1:33393 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2060157852-172.17.0.2-1731032931463 (Datanode Uuid 1248b51b-90d2-49f9-96cc-e8d5e5161b6f) service to localhost/127.0.0.1:33393 2024-11-08T02:28:57,399 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-08T02:28:57,400 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2/cluster_5a95b6b1-a8aa-b3fe-044b-1dafdc18eb2c/data/data5/current/BP-2060157852-172.17.0.2-1731032931463 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T02:28:57,400 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2/cluster_5a95b6b1-a8aa-b3fe-044b-1dafdc18eb2c/data/data6/current/BP-2060157852-172.17.0.2-1731032931463 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T02:28:57,400 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-08T02:28:57,402 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@375e6dfb{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T02:28:57,403 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@56f13ac4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T02:28:57,403 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T02:28:57,403 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7febc9c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T02:28:57,403 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@72785dee{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2/hadoop.log.dir/,STOPPED} 2024-11-08T02:28:57,404 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-08T02:28:57,404 WARN [BP-2060157852-172.17.0.2-1731032931463 heartbeating to localhost/127.0.0.1:33393 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-08T02:28:57,404 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-08T02:28:57,404 WARN [BP-2060157852-172.17.0.2-1731032931463 heartbeating to localhost/127.0.0.1:33393 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2060157852-172.17.0.2-1731032931463 (Datanode Uuid 653269ca-a5b9-47cd-9561-62727aa24ffe) service to localhost/127.0.0.1:33393 2024-11-08T02:28:57,405 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2/cluster_5a95b6b1-a8aa-b3fe-044b-1dafdc18eb2c/data/data3/current/BP-2060157852-172.17.0.2-1731032931463 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T02:28:57,405 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2/cluster_5a95b6b1-a8aa-b3fe-044b-1dafdc18eb2c/data/data4/current/BP-2060157852-172.17.0.2-1731032931463 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T02:28:57,405 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-08T02:28:57,407 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5cdad191{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T02:28:57,407 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@75356c63{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T02:28:57,407 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T02:28:57,408 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6decf963{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T02:28:57,408 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@68e19264{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2/hadoop.log.dir/,STOPPED} 2024-11-08T02:28:57,410 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-08T02:28:57,410 WARN [BP-2060157852-172.17.0.2-1731032931463 heartbeating to localhost/127.0.0.1:33393 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-08T02:28:57,410 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-08T02:28:57,410 WARN [BP-2060157852-172.17.0.2-1731032931463 heartbeating to localhost/127.0.0.1:33393 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2060157852-172.17.0.2-1731032931463 (Datanode Uuid 0a40e784-03e5-488b-bc9c-436afa2a3f34) service to localhost/127.0.0.1:33393 2024-11-08T02:28:57,410 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2/cluster_5a95b6b1-a8aa-b3fe-044b-1dafdc18eb2c/data/data1/current/BP-2060157852-172.17.0.2-1731032931463 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T02:28:57,410 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2/cluster_5a95b6b1-a8aa-b3fe-044b-1dafdc18eb2c/data/data2/current/BP-2060157852-172.17.0.2-1731032931463 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T02:28:57,411 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-08T02:28:57,416 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4f0384cb{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-08T02:28:57,416 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7cb6ff0d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T02:28:57,416 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T02:28:57,416 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1aa34083{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T02:28:57,417 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4072566{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/306a1faf-4e3e-facf-391d-d91af845fbf2/hadoop.log.dir/,STOPPED} 2024-11-08T02:28:57,423 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-08T02:28:57,447 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-08T02:28:57,453 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=149 (was 88) - Thread LEAK? -, OpenFileDescriptor=518 (was 439) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=399 (was 399), ProcessCount=11 (was 11), AvailableMemoryMB=7110 (was 7305)