2024-12-05 07:44:36,113 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-05 07:44:36,129 main DEBUG Took 0.013501 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-05 07:44:36,129 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-05 07:44:36,130 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-05 07:44:36,131 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-05 07:44:36,133 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 07:44:36,146 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-05 07:44:36,175 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 07:44:36,176 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 07:44:36,177 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 07:44:36,178 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 07:44:36,179 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 07:44:36,179 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 07:44:36,180 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 07:44:36,181 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 07:44:36,181 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 07:44:36,181 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 07:44:36,183 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 07:44:36,183 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 07:44:36,184 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 07:44:36,184 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 07:44:36,185 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 07:44:36,185 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 07:44:36,185 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 07:44:36,186 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 07:44:36,186 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 07:44:36,187 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 07:44:36,187 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 07:44:36,187 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 07:44:36,188 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 07:44:36,188 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 07:44:36,189 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 07:44:36,189 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-05 07:44:36,191 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 07:44:36,193 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-05 07:44:36,195 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-05 07:44:36,196 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-05 07:44:36,197 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-05 07:44:36,197 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-05 07:44:36,207 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-05 07:44:36,210 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-05 07:44:36,211 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-05 07:44:36,212 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-05 07:44:36,212 main DEBUG createAppenders(={Console}) 2024-12-05 07:44:36,213 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-12-05 07:44:36,213 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-05 07:44:36,214 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-12-05 07:44:36,214 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-05 07:44:36,215 main DEBUG OutputStream closed 2024-12-05 07:44:36,215 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-05 07:44:36,215 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-05 07:44:36,216 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-12-05 07:44:36,301 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-05 07:44:36,305 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-05 07:44:36,308 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-05 07:44:36,310 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-05 07:44:36,314 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-05 07:44:36,315 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-05 07:44:36,316 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-05 07:44:36,322 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-05 07:44:36,327 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-05 07:44:36,328 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-05 07:44:36,328 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-05 07:44:36,329 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-05 07:44:36,329 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-05 07:44:36,330 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-05 07:44:36,330 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-05 07:44:36,330 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-05 07:44:36,331 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-05 07:44:36,332 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-05 07:44:36,336 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-05 07:44:36,337 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-12-05 07:44:36,337 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-05 07:44:36,338 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-12-05T07:44:36,364 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-12-05 07:44:36,369 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-05 07:44:36,370 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-05T07:44:36,736 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a 2024-12-05T07:44:36,762 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a/cluster_d89fa536-fc25-b305-ed6a-5c67ccfe545d, deleteOnExit=true 2024-12-05T07:44:36,763 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a/test.cache.data in system properties and HBase conf 2024-12-05T07:44:36,763 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a/hadoop.tmp.dir in system properties and HBase conf 2024-12-05T07:44:36,764 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a/hadoop.log.dir in system properties and HBase conf 2024-12-05T07:44:36,764 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-05T07:44:36,765 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-05T07:44:36,765 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-05T07:44:36,876 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-05T07:44:36,986 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-05T07:44:36,991 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-05T07:44:36,991 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-05T07:44:36,992 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-05T07:44:36,992 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-05T07:44:36,993 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-05T07:44:36,993 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-05T07:44:36,994 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-05T07:44:36,995 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-05T07:44:36,995 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-05T07:44:36,995 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a/nfs.dump.dir in system properties and HBase conf 2024-12-05T07:44:36,996 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a/java.io.tmpdir in system properties and HBase conf 2024-12-05T07:44:36,996 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-05T07:44:36,997 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-05T07:44:36,997 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-05T07:44:38,151 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-05T07:44:38,247 INFO [Time-limited test {}] log.Log(170): Logging initialized @2956ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-05T07:44:38,337 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T07:44:38,420 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T07:44:38,451 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T07:44:38,451 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T07:44:38,453 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-05T07:44:38,473 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T07:44:38,476 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a/hadoop.log.dir/,AVAILABLE} 2024-12-05T07:44:38,477 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T07:44:38,693 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@76e4c45c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a/java.io.tmpdir/jetty-localhost-40855-hadoop-hdfs-3_4_1-tests_jar-_-any-18418095355666533745/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-05T07:44:38,700 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:40855} 2024-12-05T07:44:38,700 INFO [Time-limited test {}] server.Server(415): Started @3410ms 2024-12-05T07:44:39,301 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T07:44:39,309 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T07:44:39,311 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T07:44:39,311 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T07:44:39,311 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T07:44:39,315 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@32fec40a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a/hadoop.log.dir/,AVAILABLE} 2024-12-05T07:44:39,316 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@17f1c7fc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T07:44:39,419 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7e705dc8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a/java.io.tmpdir/jetty-localhost-35999-hadoop-hdfs-3_4_1-tests_jar-_-any-17324997821471829108/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T07:44:39,420 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2ad1569e{HTTP/1.1, (http/1.1)}{localhost:35999} 2024-12-05T07:44:39,420 INFO [Time-limited test {}] server.Server(415): Started @4130ms 2024-12-05T07:44:39,475 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T07:44:39,621 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T07:44:39,627 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T07:44:39,639 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T07:44:39,639 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T07:44:39,639 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T07:44:39,641 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a91ec1e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a/hadoop.log.dir/,AVAILABLE} 2024-12-05T07:44:39,641 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2c2c5be{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T07:44:39,759 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@26b068f7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a/java.io.tmpdir/jetty-localhost-39951-hadoop-hdfs-3_4_1-tests_jar-_-any-12621978525186571718/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T07:44:39,760 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5739b847{HTTP/1.1, (http/1.1)}{localhost:39951} 2024-12-05T07:44:39,760 INFO [Time-limited test {}] server.Server(415): Started @4470ms 2024-12-05T07:44:39,762 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T07:44:39,843 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T07:44:39,849 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T07:44:39,851 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T07:44:39,851 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T07:44:39,852 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T07:44:39,853 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7e1f796{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a/hadoop.log.dir/,AVAILABLE} 2024-12-05T07:44:39,854 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1023f385{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T07:44:39,967 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7f750918{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a/java.io.tmpdir/jetty-localhost-38575-hadoop-hdfs-3_4_1-tests_jar-_-any-7733313145991225178/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T07:44:39,967 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@86bf2a7{HTTP/1.1, (http/1.1)}{localhost:38575} 2024-12-05T07:44:39,968 INFO [Time-limited test {}] server.Server(415): Started @4678ms 2024-12-05T07:44:39,969 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T07:44:40,957 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a/cluster_d89fa536-fc25-b305-ed6a-5c67ccfe545d/data/data3/current/BP-1277874143-172.17.0.2-1733384677684/current, will proceed with Du for space computation calculation, 2024-12-05T07:44:40,957 WARN [Thread-122 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a/cluster_d89fa536-fc25-b305-ed6a-5c67ccfe545d/data/data1/current/BP-1277874143-172.17.0.2-1733384677684/current, will proceed with Du for space computation calculation, 2024-12-05T07:44:40,957 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a/cluster_d89fa536-fc25-b305-ed6a-5c67ccfe545d/data/data4/current/BP-1277874143-172.17.0.2-1733384677684/current, will proceed with Du for space computation calculation, 2024-12-05T07:44:40,961 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a/cluster_d89fa536-fc25-b305-ed6a-5c67ccfe545d/data/data2/current/BP-1277874143-172.17.0.2-1733384677684/current, will proceed with Du for space computation calculation, 2024-12-05T07:44:41,003 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T07:44:41,003 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T07:44:41,025 WARN [Thread-142 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a/cluster_d89fa536-fc25-b305-ed6a-5c67ccfe545d/data/data5/current/BP-1277874143-172.17.0.2-1733384677684/current, will proceed with Du for space computation calculation, 2024-12-05T07:44:41,025 WARN [Thread-143 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a/cluster_d89fa536-fc25-b305-ed6a-5c67ccfe545d/data/data6/current/BP-1277874143-172.17.0.2-1733384677684/current, will proceed with Du for space computation calculation, 2024-12-05T07:44:41,058 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T07:44:41,060 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x58afe8f361fb80fd with lease ID 0x640567ef96f0bafb: Processing first storage report for DS-f0cdd6d9-3ba0-41dc-9ca8-2e3e37bdcc0f from datanode DatanodeRegistration(127.0.0.1:35705, datanodeUuid=c6897825-0977-41e2-8956-53205b2691df, infoPort=40619, infoSecurePort=0, ipcPort=38939, storageInfo=lv=-57;cid=testClusterID;nsid=1461062455;c=1733384677684) 2024-12-05T07:44:41,062 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x58afe8f361fb80fd with lease ID 0x640567ef96f0bafb: from storage DS-f0cdd6d9-3ba0-41dc-9ca8-2e3e37bdcc0f node DatanodeRegistration(127.0.0.1:35705, datanodeUuid=c6897825-0977-41e2-8956-53205b2691df, infoPort=40619, infoSecurePort=0, ipcPort=38939, storageInfo=lv=-57;cid=testClusterID;nsid=1461062455;c=1733384677684), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-12-05T07:44:41,062 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4e46d8daaf541892 with lease ID 0x640567ef96f0bafc: Processing first storage report for DS-6373260d-e462-46cc-8b7e-5db71139b848 from datanode DatanodeRegistration(127.0.0.1:40617, datanodeUuid=6ece4d20-e2f1-4f27-83d4-007dae01a642, infoPort=43641, infoSecurePort=0, ipcPort=36339, storageInfo=lv=-57;cid=testClusterID;nsid=1461062455;c=1733384677684) 2024-12-05T07:44:41,062 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4e46d8daaf541892 with lease ID 0x640567ef96f0bafc: from storage DS-6373260d-e462-46cc-8b7e-5db71139b848 node DatanodeRegistration(127.0.0.1:40617, datanodeUuid=6ece4d20-e2f1-4f27-83d4-007dae01a642, infoPort=43641, infoSecurePort=0, ipcPort=36339, storageInfo=lv=-57;cid=testClusterID;nsid=1461062455;c=1733384677684), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T07:44:41,062 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x58afe8f361fb80fd with lease ID 0x640567ef96f0bafb: Processing first storage report for DS-57db58d4-7f7b-41c2-8efd-132bea875fae from datanode DatanodeRegistration(127.0.0.1:35705, datanodeUuid=c6897825-0977-41e2-8956-53205b2691df, infoPort=40619, infoSecurePort=0, ipcPort=38939, storageInfo=lv=-57;cid=testClusterID;nsid=1461062455;c=1733384677684) 2024-12-05T07:44:41,062 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x58afe8f361fb80fd with lease ID 0x640567ef96f0bafb: from storage DS-57db58d4-7f7b-41c2-8efd-132bea875fae node DatanodeRegistration(127.0.0.1:35705, datanodeUuid=c6897825-0977-41e2-8956-53205b2691df, infoPort=40619, infoSecurePort=0, ipcPort=38939, storageInfo=lv=-57;cid=testClusterID;nsid=1461062455;c=1733384677684), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-05T07:44:41,063 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4e46d8daaf541892 with lease ID 0x640567ef96f0bafc: Processing first storage report for DS-a546e2a9-e6e9-4b2c-8796-1e39bf8bdff5 from datanode DatanodeRegistration(127.0.0.1:40617, datanodeUuid=6ece4d20-e2f1-4f27-83d4-007dae01a642, infoPort=43641, infoSecurePort=0, ipcPort=36339, storageInfo=lv=-57;cid=testClusterID;nsid=1461062455;c=1733384677684) 2024-12-05T07:44:41,063 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4e46d8daaf541892 with lease ID 0x640567ef96f0bafc: from storage DS-a546e2a9-e6e9-4b2c-8796-1e39bf8bdff5 node DatanodeRegistration(127.0.0.1:40617, datanodeUuid=6ece4d20-e2f1-4f27-83d4-007dae01a642, infoPort=43641, infoSecurePort=0, ipcPort=36339, storageInfo=lv=-57;cid=testClusterID;nsid=1461062455;c=1733384677684), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T07:44:41,066 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8b867e7c75bc5492 with lease ID 0x640567ef96f0bafd: Processing first storage report for DS-12ad55ad-9329-49e8-a122-312d8d600396 from datanode DatanodeRegistration(127.0.0.1:45937, datanodeUuid=a9e8ab7e-d298-4ada-9dfd-e58886528ebb, infoPort=39419, infoSecurePort=0, ipcPort=34337, storageInfo=lv=-57;cid=testClusterID;nsid=1461062455;c=1733384677684) 2024-12-05T07:44:41,067 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8b867e7c75bc5492 with lease ID 0x640567ef96f0bafd: from storage DS-12ad55ad-9329-49e8-a122-312d8d600396 node DatanodeRegistration(127.0.0.1:45937, datanodeUuid=a9e8ab7e-d298-4ada-9dfd-e58886528ebb, infoPort=39419, infoSecurePort=0, ipcPort=34337, storageInfo=lv=-57;cid=testClusterID;nsid=1461062455;c=1733384677684), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T07:44:41,067 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8b867e7c75bc5492 with lease ID 0x640567ef96f0bafd: Processing first storage report for DS-8be57480-0638-4ac0-89d0-b03ba984a15d from datanode DatanodeRegistration(127.0.0.1:45937, datanodeUuid=a9e8ab7e-d298-4ada-9dfd-e58886528ebb, infoPort=39419, infoSecurePort=0, ipcPort=34337, storageInfo=lv=-57;cid=testClusterID;nsid=1461062455;c=1733384677684) 2024-12-05T07:44:41,067 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8b867e7c75bc5492 with lease ID 0x640567ef96f0bafd: from storage DS-8be57480-0638-4ac0-89d0-b03ba984a15d node DatanodeRegistration(127.0.0.1:45937, datanodeUuid=a9e8ab7e-d298-4ada-9dfd-e58886528ebb, infoPort=39419, infoSecurePort=0, ipcPort=34337, storageInfo=lv=-57;cid=testClusterID;nsid=1461062455;c=1733384677684), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T07:44:41,158 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a 2024-12-05T07:44:41,234 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-12-05T07:44:41,294 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=155, OpenFileDescriptor=393, MaxFileDescriptor=1048576, SystemLoadAverage=275, ProcessCount=11, AvailableMemoryMB=7391 2024-12-05T07:44:41,296 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-05T07:44:41,304 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-12-05T07:44:41,429 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a/cluster_d89fa536-fc25-b305-ed6a-5c67ccfe545d/zookeeper_0, clientPort=58368, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a/cluster_d89fa536-fc25-b305-ed6a-5c67ccfe545d/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a/cluster_d89fa536-fc25-b305-ed6a-5c67ccfe545d/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-05T07:44:41,441 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58368 2024-12-05T07:44:41,450 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T07:44:41,452 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T07:44:41,535 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T07:44:41,536 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T07:44:41,580 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_505696893_22 at /127.0.0.1:41028 [Receiving block BP-1277874143-172.17.0.2-1733384677684:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:40617:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41028 dst: /127.0.0.1:40617 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T07:44:41,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40617 is added to blk_-9223372036854775792_1002 (size=7) 2024-12-05T07:44:42,003 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T07:44:42,010 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85 with version=8 2024-12-05T07:44:42,011 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/hbase-staging 2024-12-05T07:44:42,133 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-05T07:44:42,426 INFO [Time-limited test {}] client.ConnectionUtils(128): master/fc6cd38557f3:0 server-side Connection retries=45 2024-12-05T07:44:42,438 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T07:44:42,438 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T07:44:42,445 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T07:44:42,445 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T07:44:42,446 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T07:44:42,591 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-05T07:44:42,654 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-05T07:44:42,666 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-05T07:44:42,669 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T07:44:42,695 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 15771 (auto-detected) 2024-12-05T07:44:42,696 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-05T07:44:42,715 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33229 2024-12-05T07:44:42,736 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33229 connecting to ZooKeeper ensemble=127.0.0.1:58368 2024-12-05T07:44:42,856 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:332290x0, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T07:44:42,858 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33229-0x101a5bcca860000 connected 2024-12-05T07:44:42,943 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T07:44:42,946 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T07:44:42,955 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33229-0x101a5bcca860000, quorum=127.0.0.1:58368, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T07:44:42,960 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85, hbase.cluster.distributed=false 2024-12-05T07:44:42,985 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33229-0x101a5bcca860000, quorum=127.0.0.1:58368, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T07:44:42,991 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33229 2024-12-05T07:44:42,991 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33229 2024-12-05T07:44:42,995 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33229 2024-12-05T07:44:42,995 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33229 2024-12-05T07:44:42,996 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33229 2024-12-05T07:44:43,106 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/fc6cd38557f3:0 server-side Connection retries=45 2024-12-05T07:44:43,108 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T07:44:43,108 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T07:44:43,108 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T07:44:43,109 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T07:44:43,109 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T07:44:43,112 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-05T07:44:43,114 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T07:44:43,115 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38861 2024-12-05T07:44:43,118 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38861 connecting to ZooKeeper ensemble=127.0.0.1:58368 2024-12-05T07:44:43,119 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T07:44:43,122 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T07:44:43,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:388610x0, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T07:44:43,142 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:388610x0, quorum=127.0.0.1:58368, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T07:44:43,143 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38861-0x101a5bcca860001 connected 2024-12-05T07:44:43,146 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-05T07:44:43,154 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-05T07:44:43,159 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38861-0x101a5bcca860001, quorum=127.0.0.1:58368, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-05T07:44:43,165 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38861-0x101a5bcca860001, quorum=127.0.0.1:58368, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T07:44:43,166 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38861 2024-12-05T07:44:43,166 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38861 2024-12-05T07:44:43,167 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38861 2024-12-05T07:44:43,168 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38861 2024-12-05T07:44:43,169 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38861 2024-12-05T07:44:43,185 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/fc6cd38557f3:0 server-side Connection retries=45 2024-12-05T07:44:43,185 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T07:44:43,186 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T07:44:43,186 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T07:44:43,186 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T07:44:43,187 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T07:44:43,187 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-05T07:44:43,187 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T07:44:43,188 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36931 2024-12-05T07:44:43,190 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36931 connecting to ZooKeeper ensemble=127.0.0.1:58368 2024-12-05T07:44:43,192 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T07:44:43,196 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T07:44:43,233 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:369310x0, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T07:44:43,234 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:369310x0, quorum=127.0.0.1:58368, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T07:44:43,234 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36931-0x101a5bcca860002 connected 2024-12-05T07:44:43,235 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-05T07:44:43,239 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-05T07:44:43,241 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36931-0x101a5bcca860002, quorum=127.0.0.1:58368, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-05T07:44:43,244 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36931-0x101a5bcca860002, quorum=127.0.0.1:58368, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T07:44:43,249 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36931 2024-12-05T07:44:43,250 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36931 2024-12-05T07:44:43,250 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36931 2024-12-05T07:44:43,252 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36931 2024-12-05T07:44:43,253 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36931 2024-12-05T07:44:43,268 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/fc6cd38557f3:0 server-side Connection retries=45 2024-12-05T07:44:43,269 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T07:44:43,269 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T07:44:43,269 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T07:44:43,269 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T07:44:43,269 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T07:44:43,269 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-05T07:44:43,269 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T07:44:43,270 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39335 2024-12-05T07:44:43,272 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39335 connecting to ZooKeeper ensemble=127.0.0.1:58368 2024-12-05T07:44:43,273 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T07:44:43,276 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T07:44:43,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:393350x0, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T07:44:43,299 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:393350x0, quorum=127.0.0.1:58368, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T07:44:43,299 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39335-0x101a5bcca860003 connected 2024-12-05T07:44:43,300 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-05T07:44:43,301 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-05T07:44:43,302 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39335-0x101a5bcca860003, quorum=127.0.0.1:58368, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-05T07:44:43,303 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39335-0x101a5bcca860003, quorum=127.0.0.1:58368, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T07:44:43,304 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39335 2024-12-05T07:44:43,304 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39335 2024-12-05T07:44:43,304 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39335 2024-12-05T07:44:43,305 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39335 2024-12-05T07:44:43,306 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39335 2024-12-05T07:44:43,321 DEBUG [M:0;fc6cd38557f3:33229 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;fc6cd38557f3:33229 2024-12-05T07:44:43,322 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/fc6cd38557f3,33229,1733384682235 2024-12-05T07:44:43,333 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36931-0x101a5bcca860002, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T07:44:43,333 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33229-0x101a5bcca860000, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T07:44:43,334 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38861-0x101a5bcca860001, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T07:44:43,334 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39335-0x101a5bcca860003, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T07:44:43,336 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33229-0x101a5bcca860000, quorum=127.0.0.1:58368, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/fc6cd38557f3,33229,1733384682235 2024-12-05T07:44:43,365 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36931-0x101a5bcca860002, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-05T07:44:43,365 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39335-0x101a5bcca860003, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-05T07:44:43,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33229-0x101a5bcca860000, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:43,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39335-0x101a5bcca860003, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:43,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36931-0x101a5bcca860002, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:43,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38861-0x101a5bcca860001, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-05T07:44:43,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38861-0x101a5bcca860001, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:43,367 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33229-0x101a5bcca860000, quorum=127.0.0.1:58368, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-05T07:44:43,368 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/fc6cd38557f3,33229,1733384682235 from backup master directory 2024-12-05T07:44:43,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36931-0x101a5bcca860002, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T07:44:43,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33229-0x101a5bcca860000, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/fc6cd38557f3,33229,1733384682235 2024-12-05T07:44:43,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38861-0x101a5bcca860001, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T07:44:43,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39335-0x101a5bcca860003, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T07:44:43,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33229-0x101a5bcca860000, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T07:44:43,375 WARN [master/fc6cd38557f3:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T07:44:43,375 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=fc6cd38557f3,33229,1733384682235 2024-12-05T07:44:43,377 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-05T07:44:43,379 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-05T07:44:43,445 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/hbase.id] with ID: 15d407fa-49b3-4875-94d8-ac1ad642938b 2024-12-05T07:44:43,445 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/.tmp/hbase.id 2024-12-05T07:44:43,457 WARN [master/fc6cd38557f3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T07:44:43,458 WARN [master/fc6cd38557f3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T07:44:43,466 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_505696893_22 at /127.0.0.1:54942 [Receiving block BP-1277874143-172.17.0.2-1733384677684:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:40617:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54942 dst: /127.0.0.1:40617 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T07:44:43,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40617 is added to blk_-9223372036854775776_1004 (size=42) 2024-12-05T07:44:43,474 WARN [master/fc6cd38557f3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T07:44:43,475 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/.tmp/hbase.id]:[hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/hbase.id] 2024-12-05T07:44:43,524 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T07:44:43,528 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-05T07:44:43,548 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 18ms. 2024-12-05T07:44:43,574 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38861-0x101a5bcca860001, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:43,574 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36931-0x101a5bcca860002, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:43,574 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33229-0x101a5bcca860000, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:43,574 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39335-0x101a5bcca860003, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:43,596 WARN [master/fc6cd38557f3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T07:44:43,596 WARN [master/fc6cd38557f3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T07:44:43,600 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_505696893_22 at /127.0.0.1:54954 [Receiving block BP-1277874143-172.17.0.2-1733384677684:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:40617:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54954 dst: /127.0.0.1:40617 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T07:44:43,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40617 is added to blk_-9223372036854775760_1006 (size=196) 2024-12-05T07:44:43,607 WARN [master/fc6cd38557f3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T07:44:43,625 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T07:44:43,627 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-05T07:44:43,632 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T07:44:43,666 WARN [master/fc6cd38557f3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T07:44:43,667 WARN [master/fc6cd38557f3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T07:44:43,676 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_505696893_22 at /127.0.0.1:54974 [Receiving block BP-1277874143-172.17.0.2-1733384677684:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:40617:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54974 dst: /127.0.0.1:40617 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T07:44:43,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40617 is added to blk_-9223372036854775744_1008 (size=1189) 2024-12-05T07:44:43,685 WARN [master/fc6cd38557f3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T07:44:43,705 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/MasterData/data/master/store 2024-12-05T07:44:43,730 WARN [master/fc6cd38557f3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T07:44:43,730 WARN [master/fc6cd38557f3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T07:44:43,734 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_505696893_22 at /127.0.0.1:54980 [Receiving block BP-1277874143-172.17.0.2-1733384677684:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:40617:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54980 dst: /127.0.0.1:40617 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T07:44:43,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40617 is added to blk_-9223372036854775728_1010 (size=34) 2024-12-05T07:44:43,741 WARN [master/fc6cd38557f3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T07:44:43,745 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-05T07:44:43,747 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T07:44:43,748 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-05T07:44:43,749 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T07:44:43,749 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T07:44:43,751 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-05T07:44:43,751 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T07:44:43,751 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T07:44:43,753 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733384683748Disabling compacts and flushes for region at 1733384683748Disabling writes for close at 1733384683751 (+3 ms)Writing region close event to WAL at 1733384683751Closed at 1733384683751 2024-12-05T07:44:43,756 WARN [master/fc6cd38557f3:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/MasterData/data/master/store/.initializing 2024-12-05T07:44:43,756 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/MasterData/WALs/fc6cd38557f3,33229,1733384682235 2024-12-05T07:44:43,764 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-05T07:44:43,785 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=fc6cd38557f3%2C33229%2C1733384682235, suffix=, logDir=hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/MasterData/WALs/fc6cd38557f3,33229,1733384682235, archiveDir=hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/MasterData/oldWALs, maxLogs=10 2024-12-05T07:44:43,830 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/MasterData/WALs/fc6cd38557f3,33229,1733384682235/fc6cd38557f3%2C33229%2C1733384682235.1733384683792, exclude list is [], retry=0 2024-12-05T07:44:43,852 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T07:44:43,854 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35705,DS-f0cdd6d9-3ba0-41dc-9ca8-2e3e37bdcc0f,DISK] 2024-12-05T07:44:43,854 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45937,DS-12ad55ad-9329-49e8-a122-312d8d600396,DISK] 2024-12-05T07:44:43,854 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40617,DS-6373260d-e462-46cc-8b7e-5db71139b848,DISK] 2024-12-05T07:44:43,857 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-05T07:44:43,900 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/MasterData/WALs/fc6cd38557f3,33229,1733384682235/fc6cd38557f3%2C33229%2C1733384682235.1733384683792 2024-12-05T07:44:43,901 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43641:43641),(127.0.0.1/127.0.0.1:39419:39419),(127.0.0.1/127.0.0.1:40619:40619)] 2024-12-05T07:44:43,901 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-05T07:44:43,902 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T07:44:43,907 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T07:44:43,909 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T07:44:43,950 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T07:44:43,980 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-05T07:44:43,983 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T07:44:43,985 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T07:44:43,986 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T07:44:43,989 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-05T07:44:43,989 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T07:44:43,990 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T07:44:43,991 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T07:44:43,995 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-05T07:44:43,995 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T07:44:43,997 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T07:44:43,997 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T07:44:44,001 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-05T07:44:44,001 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T07:44:44,002 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T07:44:44,003 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T07:44:44,006 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-05T07:44:44,007 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-05T07:44:44,012 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T07:44:44,013 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T07:44:44,018 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-05T07:44:44,022 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T07:44:44,028 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T07:44:44,030 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63018913, jitterRate=-0.060945019125938416}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-05T07:44:44,037 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733384683922Initializing all the Stores at 1733384683925 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733384683926 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733384683926Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733384683927 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733384683927Cleaning up temporary data from old regions at 1733384684013 (+86 ms)Region opened successfully at 1733384684037 (+24 ms) 2024-12-05T07:44:44,044 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-05T07:44:44,082 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@489641d1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=fc6cd38557f3/172.17.0.2:0 2024-12-05T07:44:44,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45937 is added to blk_-9223372036854775789_1002 (size=7) 2024-12-05T07:44:44,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35705 is added to blk_-9223372036854775788_1002 (size=7) 2024-12-05T07:44:44,121 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-05T07:44:44,133 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-05T07:44:44,133 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-05T07:44:44,135 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-05T07:44:44,137 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-05T07:44:44,143 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 5 msec 2024-12-05T07:44:44,143 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-05T07:44:44,167 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-05T07:44:44,175 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33229-0x101a5bcca860000, quorum=127.0.0.1:58368, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-05T07:44:44,224 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-05T07:44:44,226 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-05T07:44:44,228 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33229-0x101a5bcca860000, quorum=127.0.0.1:58368, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-05T07:44:44,232 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-05T07:44:44,235 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-05T07:44:44,240 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33229-0x101a5bcca860000, quorum=127.0.0.1:58368, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-05T07:44:44,249 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-05T07:44:44,251 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33229-0x101a5bcca860000, quorum=127.0.0.1:58368, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-05T07:44:44,257 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-05T07:44:44,276 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33229-0x101a5bcca860000, quorum=127.0.0.1:58368, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-05T07:44:44,282 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-05T07:44:44,290 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36931-0x101a5bcca860002, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T07:44:44,290 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38861-0x101a5bcca860001, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T07:44:44,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36931-0x101a5bcca860002, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:44,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33229-0x101a5bcca860000, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T07:44:44,290 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39335-0x101a5bcca860003, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T07:44:44,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33229-0x101a5bcca860000, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:44,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39335-0x101a5bcca860003, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:44,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38861-0x101a5bcca860001, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:44,294 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=fc6cd38557f3,33229,1733384682235, sessionid=0x101a5bcca860000, setting cluster-up flag (Was=false) 2024-12-05T07:44:44,323 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39335-0x101a5bcca860003, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:44,323 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33229-0x101a5bcca860000, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:44,323 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36931-0x101a5bcca860002, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:44,323 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38861-0x101a5bcca860001, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:44,349 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-05T07:44:44,350 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=fc6cd38557f3,33229,1733384682235 2024-12-05T07:44:44,365 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36931-0x101a5bcca860002, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:44,365 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33229-0x101a5bcca860000, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:44,365 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39335-0x101a5bcca860003, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:44,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38861-0x101a5bcca860001, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:44,390 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-05T07:44:44,392 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=fc6cd38557f3,33229,1733384682235 2024-12-05T07:44:44,399 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-05T07:44:44,410 INFO [RS:1;fc6cd38557f3:36931 {}] regionserver.HRegionServer(746): ClusterId : 15d407fa-49b3-4875-94d8-ac1ad642938b 2024-12-05T07:44:44,410 INFO [RS:2;fc6cd38557f3:39335 {}] regionserver.HRegionServer(746): ClusterId : 15d407fa-49b3-4875-94d8-ac1ad642938b 2024-12-05T07:44:44,410 INFO [RS:0;fc6cd38557f3:38861 {}] regionserver.HRegionServer(746): ClusterId : 15d407fa-49b3-4875-94d8-ac1ad642938b 2024-12-05T07:44:44,413 DEBUG [RS:1;fc6cd38557f3:36931 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-05T07:44:44,413 DEBUG [RS:0;fc6cd38557f3:38861 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-05T07:44:44,413 DEBUG [RS:2;fc6cd38557f3:39335 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-05T07:44:44,426 DEBUG [RS:0;fc6cd38557f3:38861 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-05T07:44:44,426 DEBUG [RS:2;fc6cd38557f3:39335 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-05T07:44:44,426 DEBUG [RS:1;fc6cd38557f3:36931 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-05T07:44:44,426 DEBUG [RS:0;fc6cd38557f3:38861 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-05T07:44:44,426 DEBUG [RS:2;fc6cd38557f3:39335 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-05T07:44:44,426 DEBUG [RS:1;fc6cd38557f3:36931 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-05T07:44:44,441 DEBUG [RS:1;fc6cd38557f3:36931 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-05T07:44:44,441 DEBUG [RS:2;fc6cd38557f3:39335 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-05T07:44:44,442 DEBUG [RS:1;fc6cd38557f3:36931 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4f66f6fd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=fc6cd38557f3/172.17.0.2:0 2024-12-05T07:44:44,442 DEBUG [RS:2;fc6cd38557f3:39335 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c60f2fd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=fc6cd38557f3/172.17.0.2:0 2024-12-05T07:44:44,443 DEBUG [RS:0;fc6cd38557f3:38861 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-05T07:44:44,443 DEBUG [RS:0;fc6cd38557f3:38861 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9f525c8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=fc6cd38557f3/172.17.0.2:0 2024-12-05T07:44:44,460 DEBUG [RS:2;fc6cd38557f3:39335 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;fc6cd38557f3:39335 2024-12-05T07:44:44,463 INFO [RS:2;fc6cd38557f3:39335 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-05T07:44:44,463 INFO [RS:2;fc6cd38557f3:39335 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-05T07:44:44,463 DEBUG [RS:2;fc6cd38557f3:39335 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-05T07:44:44,466 INFO [RS:2;fc6cd38557f3:39335 {}] regionserver.HRegionServer(2659): reportForDuty to master=fc6cd38557f3,33229,1733384682235 with port=39335, startcode=1733384683268 2024-12-05T07:44:44,467 DEBUG [RS:0;fc6cd38557f3:38861 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;fc6cd38557f3:38861 2024-12-05T07:44:44,467 INFO [RS:0;fc6cd38557f3:38861 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-05T07:44:44,467 INFO [RS:0;fc6cd38557f3:38861 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-05T07:44:44,467 DEBUG [RS:0;fc6cd38557f3:38861 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-05T07:44:44,468 DEBUG [RS:1;fc6cd38557f3:36931 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;fc6cd38557f3:36931 2024-12-05T07:44:44,468 INFO [RS:1;fc6cd38557f3:36931 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-05T07:44:44,468 INFO [RS:1;fc6cd38557f3:36931 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-05T07:44:44,468 DEBUG [RS:1;fc6cd38557f3:36931 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-05T07:44:44,469 INFO [RS:0;fc6cd38557f3:38861 {}] regionserver.HRegionServer(2659): reportForDuty to master=fc6cd38557f3,33229,1733384682235 with port=38861, startcode=1733384683069 2024-12-05T07:44:44,470 INFO [RS:1;fc6cd38557f3:36931 {}] regionserver.HRegionServer(2659): reportForDuty to master=fc6cd38557f3,33229,1733384682235 with port=36931, startcode=1733384683185 2024-12-05T07:44:44,480 DEBUG [RS:1;fc6cd38557f3:36931 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-05T07:44:44,481 DEBUG [RS:0;fc6cd38557f3:38861 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-05T07:44:44,481 DEBUG [RS:2;fc6cd38557f3:39335 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-05T07:44:44,487 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-05T07:44:44,498 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-05T07:44:44,507 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-05T07:44:44,516 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: fc6cd38557f3,33229,1733384682235 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-05T07:44:44,523 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33001, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-05T07:44:44,523 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38591, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-05T07:44:44,524 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49067, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-05T07:44:44,527 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/fc6cd38557f3:0, corePoolSize=5, maxPoolSize=5 2024-12-05T07:44:44,527 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/fc6cd38557f3:0, corePoolSize=5, maxPoolSize=5 2024-12-05T07:44:44,527 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/fc6cd38557f3:0, corePoolSize=5, maxPoolSize=5 2024-12-05T07:44:44,527 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/fc6cd38557f3:0, corePoolSize=5, maxPoolSize=5 2024-12-05T07:44:44,528 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/fc6cd38557f3:0, corePoolSize=10, maxPoolSize=10 2024-12-05T07:44:44,528 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:44,528 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/fc6cd38557f3:0, corePoolSize=2, maxPoolSize=2 2024-12-05T07:44:44,528 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:44,532 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33229 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-05T07:44:44,539 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33229 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-05T07:44:44,541 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33229 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-05T07:44:44,542 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T07:44:44,543 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-05T07:44:44,550 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T07:44:44,551 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733384714551 2024-12-05T07:44:44,551 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-05T07:44:44,553 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-05T07:44:44,555 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-05T07:44:44,558 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-05T07:44:44,559 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-05T07:44:44,559 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-05T07:44:44,559 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-05T07:44:44,570 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T07:44:44,570 DEBUG [RS:0;fc6cd38557f3:38861 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-05T07:44:44,570 DEBUG [RS:2;fc6cd38557f3:39335 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-05T07:44:44,570 DEBUG [RS:1;fc6cd38557f3:36931 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-05T07:44:44,570 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T07:44:44,570 WARN [RS:1;fc6cd38557f3:36931 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-05T07:44:44,570 WARN [RS:2;fc6cd38557f3:39335 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-05T07:44:44,570 WARN [RS:0;fc6cd38557f3:38861 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-05T07:44:44,576 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:44,582 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-05T07:44:44,582 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_505696893_22 at /127.0.0.1:58776 [Receiving block BP-1277874143-172.17.0.2-1733384677684:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:45937:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58776 dst: /127.0.0.1:45937 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T07:44:44,583 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-05T07:44:44,584 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-05T07:44:44,587 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-05T07:44:44,588 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-05T07:44:44,590 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/fc6cd38557f3:0:becomeActiveMaster-HFileCleaner.large.0-1733384684589,5,FailOnTimeoutGroup] 2024-12-05T07:44:44,595 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/fc6cd38557f3:0:becomeActiveMaster-HFileCleaner.small.0-1733384684590,5,FailOnTimeoutGroup] 2024-12-05T07:44:44,595 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:44,595 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-05T07:44:44,596 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:44,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45937 is added to blk_-9223372036854775712_1013 (size=1321) 2024-12-05T07:44:44,597 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:44,600 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T07:44:44,601 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-05T07:44:44,602 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85 2024-12-05T07:44:44,614 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T07:44:44,614 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T07:44:44,617 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_505696893_22 at /127.0.0.1:44420 [Receiving block BP-1277874143-172.17.0.2-1733384677684:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:35705:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44420 dst: /127.0.0.1:35705 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T07:44:44,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35705 is added to blk_-9223372036854775696_1015 (size=32) 2024-12-05T07:44:44,625 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T07:44:44,626 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T07:44:44,629 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-05T07:44:44,633 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-05T07:44:44,633 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T07:44:44,634 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T07:44:44,634 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-05T07:44:44,637 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-05T07:44:44,637 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T07:44:44,638 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T07:44:44,638 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-05T07:44:44,640 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-05T07:44:44,640 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T07:44:44,641 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T07:44:44,641 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-05T07:44:44,644 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-05T07:44:44,644 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T07:44:44,645 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T07:44:44,645 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-05T07:44:44,647 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/data/hbase/meta/1588230740 2024-12-05T07:44:44,648 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/data/hbase/meta/1588230740 2024-12-05T07:44:44,651 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-05T07:44:44,651 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-05T07:44:44,652 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-05T07:44:44,657 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-05T07:44:44,670 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T07:44:44,671 INFO [RS:2;fc6cd38557f3:39335 {}] regionserver.HRegionServer(2659): reportForDuty to master=fc6cd38557f3,33229,1733384682235 with port=39335, startcode=1733384683268 2024-12-05T07:44:44,672 INFO [RS:0;fc6cd38557f3:38861 {}] regionserver.HRegionServer(2659): reportForDuty to master=fc6cd38557f3,33229,1733384682235 with port=38861, startcode=1733384683069 2024-12-05T07:44:44,672 INFO [RS:1;fc6cd38557f3:36931 {}] regionserver.HRegionServer(2659): reportForDuty to master=fc6cd38557f3,33229,1733384682235 with port=36931, startcode=1733384683185 2024-12-05T07:44:44,672 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61163608, jitterRate=-0.08859121799468994}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-05T07:44:44,674 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33229 {}] master.ServerManager(363): Checking decommissioned status of RegionServer fc6cd38557f3,38861,1733384683069 2024-12-05T07:44:44,676 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33229 {}] master.ServerManager(517): Registering regionserver=fc6cd38557f3,38861,1733384683069 2024-12-05T07:44:44,676 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733384684626Initializing all the Stores at 1733384684629 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733384684629Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733384684629Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733384684629Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733384684629Cleaning up temporary data from old regions at 1733384684651 (+22 ms)Region opened successfully at 1733384684676 (+25 ms) 2024-12-05T07:44:44,676 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-05T07:44:44,676 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-05T07:44:44,676 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-05T07:44:44,676 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-05T07:44:44,676 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-05T07:44:44,677 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-05T07:44:44,678 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733384684676Disabling compacts and flushes for region at 1733384684676Disabling writes for close at 1733384684676Writing region close event to WAL at 1733384684677 (+1 ms)Closed at 1733384684677 2024-12-05T07:44:44,683 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T07:44:44,683 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-05T07:44:44,685 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33229 {}] master.ServerManager(363): Checking decommissioned status of RegionServer fc6cd38557f3,36931,1733384683185 2024-12-05T07:44:44,685 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33229 {}] master.ServerManager(517): Registering regionserver=fc6cd38557f3,36931,1733384683185 2024-12-05T07:44:44,685 DEBUG [RS:0;fc6cd38557f3:38861 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85 2024-12-05T07:44:44,685 DEBUG [RS:0;fc6cd38557f3:38861 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40085 2024-12-05T07:44:44,685 DEBUG [RS:0;fc6cd38557f3:38861 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-05T07:44:44,690 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33229 {}] master.ServerManager(363): Checking decommissioned status of RegionServer fc6cd38557f3,39335,1733384683268 2024-12-05T07:44:44,690 DEBUG [RS:1;fc6cd38557f3:36931 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85 2024-12-05T07:44:44,690 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33229 {}] master.ServerManager(517): Registering regionserver=fc6cd38557f3,39335,1733384683268 2024-12-05T07:44:44,690 DEBUG [RS:1;fc6cd38557f3:36931 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40085 2024-12-05T07:44:44,690 DEBUG [RS:1;fc6cd38557f3:36931 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-05T07:44:44,691 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-05T07:44:44,693 DEBUG [RS:2;fc6cd38557f3:39335 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85 2024-12-05T07:44:44,693 DEBUG [RS:2;fc6cd38557f3:39335 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40085 2024-12-05T07:44:44,693 DEBUG [RS:2;fc6cd38557f3:39335 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-05T07:44:44,701 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-05T07:44:44,705 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-05T07:44:44,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33229-0x101a5bcca860000, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T07:44:44,740 DEBUG [RS:0;fc6cd38557f3:38861 {}] zookeeper.ZKUtil(111): regionserver:38861-0x101a5bcca860001, quorum=127.0.0.1:58368, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/fc6cd38557f3,38861,1733384683069 2024-12-05T07:44:44,740 WARN [RS:0;fc6cd38557f3:38861 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T07:44:44,740 INFO [RS:0;fc6cd38557f3:38861 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T07:44:44,741 DEBUG [RS:0;fc6cd38557f3:38861 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/WALs/fc6cd38557f3,38861,1733384683069 2024-12-05T07:44:44,742 DEBUG [RS:2;fc6cd38557f3:39335 {}] zookeeper.ZKUtil(111): regionserver:39335-0x101a5bcca860003, quorum=127.0.0.1:58368, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/fc6cd38557f3,39335,1733384683268 2024-12-05T07:44:44,742 DEBUG [RS:1;fc6cd38557f3:36931 {}] zookeeper.ZKUtil(111): regionserver:36931-0x101a5bcca860002, quorum=127.0.0.1:58368, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/fc6cd38557f3,36931,1733384683185 2024-12-05T07:44:44,742 WARN [RS:2;fc6cd38557f3:39335 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T07:44:44,742 WARN [RS:1;fc6cd38557f3:36931 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T07:44:44,742 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [fc6cd38557f3,36931,1733384683185] 2024-12-05T07:44:44,742 INFO [RS:2;fc6cd38557f3:39335 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T07:44:44,742 INFO [RS:1;fc6cd38557f3:36931 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T07:44:44,742 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [fc6cd38557f3,38861,1733384683069] 2024-12-05T07:44:44,742 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [fc6cd38557f3,39335,1733384683268] 2024-12-05T07:44:44,742 DEBUG [RS:2;fc6cd38557f3:39335 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/WALs/fc6cd38557f3,39335,1733384683268 2024-12-05T07:44:44,742 DEBUG [RS:1;fc6cd38557f3:36931 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/WALs/fc6cd38557f3,36931,1733384683185 2024-12-05T07:44:44,767 INFO [RS:0;fc6cd38557f3:38861 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-05T07:44:44,767 INFO [RS:1;fc6cd38557f3:36931 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-05T07:44:44,767 INFO [RS:2;fc6cd38557f3:39335 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-05T07:44:44,781 INFO [RS:0;fc6cd38557f3:38861 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-05T07:44:44,781 INFO [RS:1;fc6cd38557f3:36931 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-05T07:44:44,781 INFO [RS:2;fc6cd38557f3:39335 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-05T07:44:44,787 INFO [RS:0;fc6cd38557f3:38861 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-05T07:44:44,787 INFO [RS:1;fc6cd38557f3:36931 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-05T07:44:44,787 INFO [RS:0;fc6cd38557f3:38861 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:44,787 INFO [RS:2;fc6cd38557f3:39335 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-05T07:44:44,787 INFO [RS:1;fc6cd38557f3:36931 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:44,787 INFO [RS:2;fc6cd38557f3:39335 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:44,788 INFO [RS:0;fc6cd38557f3:38861 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-05T07:44:44,790 INFO [RS:1;fc6cd38557f3:36931 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-05T07:44:44,790 INFO [RS:2;fc6cd38557f3:39335 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-05T07:44:44,794 INFO [RS:1;fc6cd38557f3:36931 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-05T07:44:44,794 INFO [RS:0;fc6cd38557f3:38861 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-05T07:44:44,795 INFO [RS:2;fc6cd38557f3:39335 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-05T07:44:44,796 INFO [RS:1;fc6cd38557f3:36931 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:44,796 INFO [RS:2;fc6cd38557f3:39335 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:44,796 INFO [RS:0;fc6cd38557f3:38861 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:44,796 DEBUG [RS:0;fc6cd38557f3:38861 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:44,796 DEBUG [RS:1;fc6cd38557f3:36931 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:44,797 DEBUG [RS:0;fc6cd38557f3:38861 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:44,797 DEBUG [RS:2;fc6cd38557f3:39335 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:44,797 DEBUG [RS:0;fc6cd38557f3:38861 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:44,797 DEBUG [RS:2;fc6cd38557f3:39335 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:44,797 DEBUG [RS:0;fc6cd38557f3:38861 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:44,797 DEBUG [RS:2;fc6cd38557f3:39335 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:44,797 DEBUG [RS:2;fc6cd38557f3:39335 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:44,797 DEBUG [RS:0;fc6cd38557f3:38861 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:44,797 DEBUG [RS:2;fc6cd38557f3:39335 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:44,797 DEBUG [RS:0;fc6cd38557f3:38861 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/fc6cd38557f3:0, corePoolSize=2, maxPoolSize=2 2024-12-05T07:44:44,797 DEBUG [RS:2;fc6cd38557f3:39335 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/fc6cd38557f3:0, corePoolSize=2, maxPoolSize=2 2024-12-05T07:44:44,797 DEBUG [RS:2;fc6cd38557f3:39335 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:44,797 DEBUG [RS:0;fc6cd38557f3:38861 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:44,797 DEBUG [RS:2;fc6cd38557f3:39335 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:44,797 DEBUG [RS:0;fc6cd38557f3:38861 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:44,797 DEBUG [RS:1;fc6cd38557f3:36931 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:44,798 DEBUG [RS:2;fc6cd38557f3:39335 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:44,798 DEBUG [RS:0;fc6cd38557f3:38861 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:44,798 DEBUG [RS:1;fc6cd38557f3:36931 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:44,798 DEBUG [RS:0;fc6cd38557f3:38861 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:44,798 DEBUG [RS:2;fc6cd38557f3:39335 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:44,798 DEBUG [RS:1;fc6cd38557f3:36931 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:44,798 DEBUG [RS:0;fc6cd38557f3:38861 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:44,798 DEBUG [RS:1;fc6cd38557f3:36931 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:44,798 DEBUG [RS:2;fc6cd38557f3:39335 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:44,798 DEBUG [RS:2;fc6cd38557f3:39335 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:44,798 DEBUG [RS:0;fc6cd38557f3:38861 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:44,798 DEBUG [RS:1;fc6cd38557f3:36931 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/fc6cd38557f3:0, corePoolSize=2, maxPoolSize=2 2024-12-05T07:44:44,798 DEBUG [RS:2;fc6cd38557f3:39335 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/fc6cd38557f3:0, corePoolSize=3, maxPoolSize=3 2024-12-05T07:44:44,798 DEBUG [RS:0;fc6cd38557f3:38861 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/fc6cd38557f3:0, corePoolSize=3, maxPoolSize=3 2024-12-05T07:44:44,798 DEBUG [RS:1;fc6cd38557f3:36931 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:44,798 DEBUG [RS:2;fc6cd38557f3:39335 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/fc6cd38557f3:0, corePoolSize=3, maxPoolSize=3 2024-12-05T07:44:44,798 DEBUG [RS:1;fc6cd38557f3:36931 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:44,798 DEBUG [RS:0;fc6cd38557f3:38861 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/fc6cd38557f3:0, corePoolSize=3, maxPoolSize=3 2024-12-05T07:44:44,798 DEBUG [RS:1;fc6cd38557f3:36931 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:44,799 DEBUG [RS:1;fc6cd38557f3:36931 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:44,799 DEBUG [RS:1;fc6cd38557f3:36931 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:44,799 DEBUG [RS:1;fc6cd38557f3:36931 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:44,799 DEBUG [RS:1;fc6cd38557f3:36931 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/fc6cd38557f3:0, corePoolSize=3, maxPoolSize=3 2024-12-05T07:44:44,799 DEBUG [RS:1;fc6cd38557f3:36931 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/fc6cd38557f3:0, corePoolSize=3, maxPoolSize=3 2024-12-05T07:44:44,800 INFO [RS:2;fc6cd38557f3:39335 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:44,800 INFO [RS:2;fc6cd38557f3:39335 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:44,800 INFO [RS:2;fc6cd38557f3:39335 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:44,801 INFO [RS:2;fc6cd38557f3:39335 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:44,801 INFO [RS:2;fc6cd38557f3:39335 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:44,801 INFO [RS:2;fc6cd38557f3:39335 {}] hbase.ChoreService(168): Chore ScheduledChore name=fc6cd38557f3,39335,1733384683268-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T07:44:44,801 INFO [RS:0;fc6cd38557f3:38861 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:44,801 INFO [RS:0;fc6cd38557f3:38861 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:44,801 INFO [RS:0;fc6cd38557f3:38861 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:44,801 INFO [RS:0;fc6cd38557f3:38861 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:44,801 INFO [RS:0;fc6cd38557f3:38861 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:44,801 INFO [RS:1;fc6cd38557f3:36931 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:44,801 INFO [RS:0;fc6cd38557f3:38861 {}] hbase.ChoreService(168): Chore ScheduledChore name=fc6cd38557f3,38861,1733384683069-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T07:44:44,802 INFO [RS:1;fc6cd38557f3:36931 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:44,802 INFO [RS:1;fc6cd38557f3:36931 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:44,802 INFO [RS:1;fc6cd38557f3:36931 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:44,802 INFO [RS:1;fc6cd38557f3:36931 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:44,802 INFO [RS:1;fc6cd38557f3:36931 {}] hbase.ChoreService(168): Chore ScheduledChore name=fc6cd38557f3,36931,1733384683185-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T07:44:44,818 INFO [RS:2;fc6cd38557f3:39335 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-05T07:44:44,818 INFO [RS:1;fc6cd38557f3:36931 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-05T07:44:44,820 INFO [RS:2;fc6cd38557f3:39335 {}] hbase.ChoreService(168): Chore ScheduledChore name=fc6cd38557f3,39335,1733384683268-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:44,820 INFO [RS:1;fc6cd38557f3:36931 {}] hbase.ChoreService(168): Chore ScheduledChore name=fc6cd38557f3,36931,1733384683185-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:44,821 INFO [RS:1;fc6cd38557f3:36931 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:44,821 INFO [RS:2;fc6cd38557f3:39335 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:44,821 INFO [RS:1;fc6cd38557f3:36931 {}] regionserver.Replication(171): fc6cd38557f3,36931,1733384683185 started 2024-12-05T07:44:44,821 INFO [RS:2;fc6cd38557f3:39335 {}] regionserver.Replication(171): fc6cd38557f3,39335,1733384683268 started 2024-12-05T07:44:44,824 INFO [RS:0;fc6cd38557f3:38861 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-05T07:44:44,824 INFO [RS:0;fc6cd38557f3:38861 {}] hbase.ChoreService(168): Chore ScheduledChore name=fc6cd38557f3,38861,1733384683069-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:44,824 INFO [RS:0;fc6cd38557f3:38861 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:44,824 INFO [RS:0;fc6cd38557f3:38861 {}] regionserver.Replication(171): fc6cd38557f3,38861,1733384683069 started 2024-12-05T07:44:44,845 INFO [RS:2;fc6cd38557f3:39335 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:44,845 INFO [RS:1;fc6cd38557f3:36931 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:44,846 INFO [RS:1;fc6cd38557f3:36931 {}] regionserver.HRegionServer(1482): Serving as fc6cd38557f3,36931,1733384683185, RpcServer on fc6cd38557f3/172.17.0.2:36931, sessionid=0x101a5bcca860002 2024-12-05T07:44:44,846 INFO [RS:2;fc6cd38557f3:39335 {}] regionserver.HRegionServer(1482): Serving as fc6cd38557f3,39335,1733384683268, RpcServer on fc6cd38557f3/172.17.0.2:39335, sessionid=0x101a5bcca860003 2024-12-05T07:44:44,847 DEBUG [RS:2;fc6cd38557f3:39335 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-05T07:44:44,847 DEBUG [RS:1;fc6cd38557f3:36931 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-05T07:44:44,847 INFO [RS:0;fc6cd38557f3:38861 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:44,847 INFO [RS:0;fc6cd38557f3:38861 {}] regionserver.HRegionServer(1482): Serving as fc6cd38557f3,38861,1733384683069, RpcServer on fc6cd38557f3/172.17.0.2:38861, sessionid=0x101a5bcca860001 2024-12-05T07:44:44,847 DEBUG [RS:1;fc6cd38557f3:36931 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager fc6cd38557f3,36931,1733384683185 2024-12-05T07:44:44,847 DEBUG [RS:2;fc6cd38557f3:39335 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager fc6cd38557f3,39335,1733384683268 2024-12-05T07:44:44,847 DEBUG [RS:0;fc6cd38557f3:38861 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-05T07:44:44,847 DEBUG [RS:2;fc6cd38557f3:39335 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'fc6cd38557f3,39335,1733384683268' 2024-12-05T07:44:44,847 DEBUG [RS:1;fc6cd38557f3:36931 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'fc6cd38557f3,36931,1733384683185' 2024-12-05T07:44:44,847 DEBUG [RS:0;fc6cd38557f3:38861 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager fc6cd38557f3,38861,1733384683069 2024-12-05T07:44:44,847 DEBUG [RS:0;fc6cd38557f3:38861 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'fc6cd38557f3,38861,1733384683069' 2024-12-05T07:44:44,847 DEBUG [RS:0;fc6cd38557f3:38861 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-05T07:44:44,847 DEBUG [RS:1;fc6cd38557f3:36931 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-05T07:44:44,847 DEBUG [RS:2;fc6cd38557f3:39335 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-05T07:44:44,848 DEBUG [RS:1;fc6cd38557f3:36931 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-05T07:44:44,848 DEBUG [RS:2;fc6cd38557f3:39335 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-05T07:44:44,848 DEBUG [RS:0;fc6cd38557f3:38861 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-05T07:44:44,849 DEBUG [RS:2;fc6cd38557f3:39335 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-05T07:44:44,849 DEBUG [RS:1;fc6cd38557f3:36931 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-05T07:44:44,849 DEBUG [RS:2;fc6cd38557f3:39335 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-05T07:44:44,849 DEBUG [RS:1;fc6cd38557f3:36931 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-05T07:44:44,849 DEBUG [RS:2;fc6cd38557f3:39335 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager fc6cd38557f3,39335,1733384683268 2024-12-05T07:44:44,849 DEBUG [RS:1;fc6cd38557f3:36931 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager fc6cd38557f3,36931,1733384683185 2024-12-05T07:44:44,849 DEBUG [RS:2;fc6cd38557f3:39335 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'fc6cd38557f3,39335,1733384683268' 2024-12-05T07:44:44,849 DEBUG [RS:1;fc6cd38557f3:36931 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'fc6cd38557f3,36931,1733384683185' 2024-12-05T07:44:44,849 DEBUG [RS:2;fc6cd38557f3:39335 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-05T07:44:44,849 DEBUG [RS:0;fc6cd38557f3:38861 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-05T07:44:44,849 DEBUG [RS:1;fc6cd38557f3:36931 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-05T07:44:44,849 DEBUG [RS:0;fc6cd38557f3:38861 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-05T07:44:44,849 DEBUG [RS:0;fc6cd38557f3:38861 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager fc6cd38557f3,38861,1733384683069 2024-12-05T07:44:44,850 DEBUG [RS:0;fc6cd38557f3:38861 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'fc6cd38557f3,38861,1733384683069' 2024-12-05T07:44:44,850 DEBUG [RS:0;fc6cd38557f3:38861 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-05T07:44:44,851 DEBUG [RS:1;fc6cd38557f3:36931 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-05T07:44:44,851 DEBUG [RS:0;fc6cd38557f3:38861 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-05T07:44:44,851 DEBUG [RS:2;fc6cd38557f3:39335 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-05T07:44:44,851 DEBUG [RS:1;fc6cd38557f3:36931 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-05T07:44:44,851 INFO [RS:1;fc6cd38557f3:36931 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-05T07:44:44,851 DEBUG [RS:0;fc6cd38557f3:38861 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-05T07:44:44,851 INFO [RS:1;fc6cd38557f3:36931 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-05T07:44:44,851 INFO [RS:0;fc6cd38557f3:38861 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-05T07:44:44,852 INFO [RS:0;fc6cd38557f3:38861 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-05T07:44:44,852 DEBUG [RS:2;fc6cd38557f3:39335 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-05T07:44:44,852 INFO [RS:2;fc6cd38557f3:39335 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-05T07:44:44,852 INFO [RS:2;fc6cd38557f3:39335 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-05T07:44:44,856 WARN [fc6cd38557f3:33229 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-05T07:44:44,959 INFO [RS:2;fc6cd38557f3:39335 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-05T07:44:44,959 INFO [RS:0;fc6cd38557f3:38861 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-05T07:44:44,960 INFO [RS:1;fc6cd38557f3:36931 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-05T07:44:44,964 INFO [RS:1;fc6cd38557f3:36931 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=fc6cd38557f3%2C36931%2C1733384683185, suffix=, logDir=hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/WALs/fc6cd38557f3,36931,1733384683185, archiveDir=hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/oldWALs, maxLogs=32 2024-12-05T07:44:44,965 INFO [RS:2;fc6cd38557f3:39335 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=fc6cd38557f3%2C39335%2C1733384683268, suffix=, logDir=hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/WALs/fc6cd38557f3,39335,1733384683268, archiveDir=hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/oldWALs, maxLogs=32 2024-12-05T07:44:44,971 INFO [RS:0;fc6cd38557f3:38861 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=fc6cd38557f3%2C38861%2C1733384683069, suffix=, logDir=hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/WALs/fc6cd38557f3,38861,1733384683069, archiveDir=hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/oldWALs, maxLogs=32 2024-12-05T07:44:44,985 DEBUG [RS:2;fc6cd38557f3:39335 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/WALs/fc6cd38557f3,39335,1733384683268/fc6cd38557f3%2C39335%2C1733384683268.1733384684971, exclude list is [], retry=0 2024-12-05T07:44:44,989 DEBUG [RS:1;fc6cd38557f3:36931 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/WALs/fc6cd38557f3,36931,1733384683185/fc6cd38557f3%2C36931%2C1733384683185.1733384684971, exclude list is [], retry=0 2024-12-05T07:44:44,993 DEBUG [RS:0;fc6cd38557f3:38861 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/WALs/fc6cd38557f3,38861,1733384683069/fc6cd38557f3%2C38861%2C1733384683069.1733384684973, exclude list is [], retry=0 2024-12-05T07:44:44,993 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40617,DS-6373260d-e462-46cc-8b7e-5db71139b848,DISK] 2024-12-05T07:44:44,994 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45937,DS-12ad55ad-9329-49e8-a122-312d8d600396,DISK] 2024-12-05T07:44:44,996 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35705,DS-f0cdd6d9-3ba0-41dc-9ca8-2e3e37bdcc0f,DISK] 2024-12-05T07:44:44,997 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45937,DS-12ad55ad-9329-49e8-a122-312d8d600396,DISK] 2024-12-05T07:44:45,000 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35705,DS-f0cdd6d9-3ba0-41dc-9ca8-2e3e37bdcc0f,DISK] 2024-12-05T07:44:45,001 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40617,DS-6373260d-e462-46cc-8b7e-5db71139b848,DISK] 2024-12-05T07:44:45,036 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35705,DS-f0cdd6d9-3ba0-41dc-9ca8-2e3e37bdcc0f,DISK] 2024-12-05T07:44:45,036 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40617,DS-6373260d-e462-46cc-8b7e-5db71139b848,DISK] 2024-12-05T07:44:45,039 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45937,DS-12ad55ad-9329-49e8-a122-312d8d600396,DISK] 2024-12-05T07:44:45,047 INFO [RS:2;fc6cd38557f3:39335 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/WALs/fc6cd38557f3,39335,1733384683268/fc6cd38557f3%2C39335%2C1733384683268.1733384684971 2024-12-05T07:44:45,050 INFO [RS:1;fc6cd38557f3:36931 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/WALs/fc6cd38557f3,36931,1733384683185/fc6cd38557f3%2C36931%2C1733384683185.1733384684971 2024-12-05T07:44:45,051 DEBUG [RS:2;fc6cd38557f3:39335 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39419:39419),(127.0.0.1/127.0.0.1:40619:40619),(127.0.0.1/127.0.0.1:43641:43641)] 2024-12-05T07:44:45,053 INFO [RS:0;fc6cd38557f3:38861 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/WALs/fc6cd38557f3,38861,1733384683069/fc6cd38557f3%2C38861%2C1733384683069.1733384684973 2024-12-05T07:44:45,055 DEBUG [RS:1;fc6cd38557f3:36931 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40619:40619),(127.0.0.1/127.0.0.1:43641:43641),(127.0.0.1/127.0.0.1:39419:39419)] 2024-12-05T07:44:45,055 DEBUG [RS:0;fc6cd38557f3:38861 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40619:40619),(127.0.0.1/127.0.0.1:39419:39419),(127.0.0.1/127.0.0.1:43641:43641)] 2024-12-05T07:44:45,110 DEBUG [fc6cd38557f3:33229 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-05T07:44:45,121 DEBUG [fc6cd38557f3:33229 {}] balancer.BalancerClusterState(204): Hosts are {fc6cd38557f3=0} racks are {/default-rack=0} 2024-12-05T07:44:45,138 DEBUG [fc6cd38557f3:33229 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T07:44:45,138 DEBUG [fc6cd38557f3:33229 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T07:44:45,139 DEBUG [fc6cd38557f3:33229 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T07:44:45,139 DEBUG [fc6cd38557f3:33229 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T07:44:45,139 DEBUG [fc6cd38557f3:33229 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T07:44:45,139 DEBUG [fc6cd38557f3:33229 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T07:44:45,139 INFO [fc6cd38557f3:33229 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T07:44:45,139 INFO [fc6cd38557f3:33229 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T07:44:45,139 INFO [fc6cd38557f3:33229 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T07:44:45,139 DEBUG [fc6cd38557f3:33229 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T07:44:45,148 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=fc6cd38557f3,36931,1733384683185 2024-12-05T07:44:45,155 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as fc6cd38557f3,36931,1733384683185, state=OPENING 2024-12-05T07:44:45,223 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-05T07:44:45,232 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39335-0x101a5bcca860003, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:45,232 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36931-0x101a5bcca860002, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:45,232 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38861-0x101a5bcca860001, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:45,232 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33229-0x101a5bcca860000, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:45,233 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T07:44:45,233 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T07:44:45,233 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T07:44:45,233 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T07:44:45,235 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-05T07:44:45,237 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=fc6cd38557f3,36931,1733384683185}] 2024-12-05T07:44:45,434 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-05T07:44:45,437 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33573, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-05T07:44:45,462 INFO [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-05T07:44:45,463 INFO [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T07:44:45,463 INFO [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-05T07:44:45,468 INFO [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=fc6cd38557f3%2C36931%2C1733384683185.meta, suffix=.meta, logDir=hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/WALs/fc6cd38557f3,36931,1733384683185, archiveDir=hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/oldWALs, maxLogs=32 2024-12-05T07:44:45,485 DEBUG [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/WALs/fc6cd38557f3,36931,1733384683185/fc6cd38557f3%2C36931%2C1733384683185.meta.1733384685470.meta, exclude list is [], retry=0 2024-12-05T07:44:45,491 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35705,DS-f0cdd6d9-3ba0-41dc-9ca8-2e3e37bdcc0f,DISK] 2024-12-05T07:44:45,491 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40617,DS-6373260d-e462-46cc-8b7e-5db71139b848,DISK] 2024-12-05T07:44:45,491 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45937,DS-12ad55ad-9329-49e8-a122-312d8d600396,DISK] 2024-12-05T07:44:45,498 INFO [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/WALs/fc6cd38557f3,36931,1733384683185/fc6cd38557f3%2C36931%2C1733384683185.meta.1733384685470.meta 2024-12-05T07:44:45,500 DEBUG [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43641:43641),(127.0.0.1/127.0.0.1:40619:40619),(127.0.0.1/127.0.0.1:39419:39419)] 2024-12-05T07:44:45,500 DEBUG [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-05T07:44:45,503 DEBUG [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-05T07:44:45,506 DEBUG [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-05T07:44:45,510 INFO [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-05T07:44:45,514 DEBUG [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-05T07:44:45,514 DEBUG [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T07:44:45,515 DEBUG [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-05T07:44:45,515 DEBUG [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-05T07:44:45,518 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-05T07:44:45,520 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-05T07:44:45,521 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T07:44:45,522 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T07:44:45,522 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-05T07:44:45,524 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-05T07:44:45,524 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T07:44:45,525 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T07:44:45,525 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-05T07:44:45,526 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-05T07:44:45,526 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T07:44:45,527 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T07:44:45,527 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-05T07:44:45,529 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-05T07:44:45,529 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T07:44:45,530 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T07:44:45,530 DEBUG [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-05T07:44:45,531 DEBUG [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/data/hbase/meta/1588230740 2024-12-05T07:44:45,537 DEBUG [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/data/hbase/meta/1588230740 2024-12-05T07:44:45,540 DEBUG [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-05T07:44:45,540 DEBUG [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-05T07:44:45,541 DEBUG [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-05T07:44:45,543 DEBUG [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-05T07:44:45,545 INFO [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70411918, jitterRate=0.049219340085983276}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-05T07:44:45,545 DEBUG [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-05T07:44:45,546 DEBUG [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733384685515Writing region info on filesystem at 1733384685515Initializing all the Stores at 1733384685518 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733384685518Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733384685518Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733384685518Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733384685518Cleaning up temporary data from old regions at 1733384685540 (+22 ms)Running coprocessor post-open hooks at 1733384685545 (+5 ms)Region opened successfully at 1733384685546 (+1 ms) 2024-12-05T07:44:45,554 INFO [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733384685405 2024-12-05T07:44:45,565 DEBUG [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-05T07:44:45,565 INFO [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-05T07:44:45,567 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=fc6cd38557f3,36931,1733384683185 2024-12-05T07:44:45,574 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as fc6cd38557f3,36931,1733384683185, state=OPEN 2024-12-05T07:44:45,640 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39335-0x101a5bcca860003, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T07:44:45,640 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38861-0x101a5bcca860001, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T07:44:45,640 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36931-0x101a5bcca860002, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T07:44:45,640 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T07:44:45,640 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T07:44:45,640 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T07:44:45,640 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33229-0x101a5bcca860000, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T07:44:45,640 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T07:44:45,641 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=fc6cd38557f3,36931,1733384683185 2024-12-05T07:44:45,646 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-05T07:44:45,647 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=fc6cd38557f3,36931,1733384683185 in 404 msec 2024-12-05T07:44:45,656 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-05T07:44:45,656 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 957 msec 2024-12-05T07:44:45,657 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T07:44:45,657 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-05T07:44:45,676 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T07:44:45,677 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fc6cd38557f3,36931,1733384683185, seqNum=-1] 2024-12-05T07:44:45,696 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T07:44:45,698 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60911, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T07:44:45,724 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.2860 sec 2024-12-05T07:44:45,724 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733384685724, completionTime=-1 2024-12-05T07:44:45,727 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-05T07:44:45,728 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-05T07:44:45,765 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-05T07:44:45,765 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733384745765 2024-12-05T07:44:45,765 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733384805765 2024-12-05T07:44:45,766 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 37 msec 2024-12-05T07:44:45,768 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-05T07:44:45,777 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=fc6cd38557f3,33229,1733384682235-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:45,777 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=fc6cd38557f3,33229,1733384682235-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:45,777 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=fc6cd38557f3,33229,1733384682235-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:45,780 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-fc6cd38557f3:33229, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:45,780 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:45,781 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:45,788 DEBUG [master/fc6cd38557f3:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-05T07:44:45,810 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.435sec 2024-12-05T07:44:45,812 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-05T07:44:45,813 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-05T07:44:45,813 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-05T07:44:45,814 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-05T07:44:45,814 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-05T07:44:45,814 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=fc6cd38557f3,33229,1733384682235-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T07:44:45,815 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=fc6cd38557f3,33229,1733384682235-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-05T07:44:45,820 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-05T07:44:45,821 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-05T07:44:45,822 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=fc6cd38557f3,33229,1733384682235-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:45,841 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27b908bc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T07:44:45,847 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-05T07:44:45,848 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-05T07:44:45,854 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request fc6cd38557f3,33229,-1 for getting cluster id 2024-12-05T07:44:45,856 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T07:44:45,867 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '15d407fa-49b3-4875-94d8-ac1ad642938b' 2024-12-05T07:44:45,870 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T07:44:45,871 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "15d407fa-49b3-4875-94d8-ac1ad642938b" 2024-12-05T07:44:45,871 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@656aeb8b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T07:44:45,871 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [fc6cd38557f3,33229,-1] 2024-12-05T07:44:45,875 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T07:44:45,878 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T07:44:45,879 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42342, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T07:44:45,884 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@624c47c2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T07:44:45,885 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T07:44:45,895 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fc6cd38557f3,36931,1733384683185, seqNum=-1] 2024-12-05T07:44:45,897 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T07:44:45,901 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54114, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T07:44:45,928 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=fc6cd38557f3,33229,1733384682235 2024-12-05T07:44:45,933 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-05T07:44:45,938 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is fc6cd38557f3,33229,1733384682235 2024-12-05T07:44:45,940 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@45b7b835 2024-12-05T07:44:45,941 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-05T07:44:45,943 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42348, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-05T07:44:45,949 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33229 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T07:44:45,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33229 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-05T07:44:45,960 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-05T07:44:45,962 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33229 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-05T07:44:45,962 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T07:44:45,965 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-05T07:44:45,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33229 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T07:44:45,997 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T07:44:45,997 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T07:44:46,002 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_505696893_22 at /127.0.0.1:55054 [Receiving block BP-1277874143-172.17.0.2-1733384677684:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:40617:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55054 dst: /127.0.0.1:40617 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T07:44:46,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40617 is added to blk_-9223372036854775680_1021 (size=392) 2024-12-05T07:44:46,017 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T07:44:46,020 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 2a2aca0d464f6f2e1621effde811c7fd, NAME => 'TestHBaseWalOnEC,,1733384685945.2a2aca0d464f6f2e1621effde811c7fd.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85 2024-12-05T07:44:46,033 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T07:44:46,033 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T07:44:46,044 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_505696893_22 at /127.0.0.1:55076 [Receiving block BP-1277874143-172.17.0.2-1733384677684:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:40617:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55076 dst: /127.0.0.1:40617 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T07:44:46,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40617 is added to blk_-9223372036854775664_1023 (size=51) 2024-12-05T07:44:46,051 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T07:44:46,051 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733384685945.2a2aca0d464f6f2e1621effde811c7fd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T07:44:46,051 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 2a2aca0d464f6f2e1621effde811c7fd, disabling compactions & flushes 2024-12-05T07:44:46,052 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733384685945.2a2aca0d464f6f2e1621effde811c7fd. 2024-12-05T07:44:46,052 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733384685945.2a2aca0d464f6f2e1621effde811c7fd. 2024-12-05T07:44:46,052 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733384685945.2a2aca0d464f6f2e1621effde811c7fd. after waiting 0 ms 2024-12-05T07:44:46,052 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733384685945.2a2aca0d464f6f2e1621effde811c7fd. 2024-12-05T07:44:46,052 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733384685945.2a2aca0d464f6f2e1621effde811c7fd. 2024-12-05T07:44:46,052 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 2a2aca0d464f6f2e1621effde811c7fd: Waiting for close lock at 1733384686051Disabling compacts and flushes for region at 1733384686051Disabling writes for close at 1733384686052 (+1 ms)Writing region close event to WAL at 1733384686052Closed at 1733384686052 2024-12-05T07:44:46,059 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-05T07:44:46,065 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733384685945.2a2aca0d464f6f2e1621effde811c7fd.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733384686059"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733384686059"}]},"ts":"1733384686059"} 2024-12-05T07:44:46,071 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-05T07:44:46,074 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-05T07:44:46,076 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733384686074"}]},"ts":"1733384686074"} 2024-12-05T07:44:46,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33229 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T07:44:46,084 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-05T07:44:46,085 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {fc6cd38557f3=0} racks are {/default-rack=0} 2024-12-05T07:44:46,086 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T07:44:46,086 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T07:44:46,086 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T07:44:46,087 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T07:44:46,087 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T07:44:46,087 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T07:44:46,087 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T07:44:46,087 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T07:44:46,087 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T07:44:46,087 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T07:44:46,089 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=2a2aca0d464f6f2e1621effde811c7fd, ASSIGN}] 2024-12-05T07:44:46,092 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=2a2aca0d464f6f2e1621effde811c7fd, ASSIGN 2024-12-05T07:44:46,094 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=2a2aca0d464f6f2e1621effde811c7fd, ASSIGN; state=OFFLINE, location=fc6cd38557f3,38861,1733384683069; forceNewPlan=false, retain=false 2024-12-05T07:44:46,247 INFO [fc6cd38557f3:33229 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-05T07:44:46,248 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=2a2aca0d464f6f2e1621effde811c7fd, regionState=OPENING, regionLocation=fc6cd38557f3,38861,1733384683069 2024-12-05T07:44:46,252 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=2a2aca0d464f6f2e1621effde811c7fd, ASSIGN because future has completed 2024-12-05T07:44:46,253 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2a2aca0d464f6f2e1621effde811c7fd, server=fc6cd38557f3,38861,1733384683069}] 2024-12-05T07:44:46,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33229 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T07:44:46,408 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-05T07:44:46,411 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54987, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-05T07:44:46,416 INFO [RS_OPEN_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733384685945.2a2aca0d464f6f2e1621effde811c7fd. 2024-12-05T07:44:46,417 DEBUG [RS_OPEN_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 2a2aca0d464f6f2e1621effde811c7fd, NAME => 'TestHBaseWalOnEC,,1733384685945.2a2aca0d464f6f2e1621effde811c7fd.', STARTKEY => '', ENDKEY => ''} 2024-12-05T07:44:46,417 DEBUG [RS_OPEN_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 2a2aca0d464f6f2e1621effde811c7fd 2024-12-05T07:44:46,417 DEBUG [RS_OPEN_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733384685945.2a2aca0d464f6f2e1621effde811c7fd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T07:44:46,417 DEBUG [RS_OPEN_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 2a2aca0d464f6f2e1621effde811c7fd 2024-12-05T07:44:46,418 DEBUG [RS_OPEN_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 2a2aca0d464f6f2e1621effde811c7fd 2024-12-05T07:44:46,420 INFO [StoreOpener-2a2aca0d464f6f2e1621effde811c7fd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 2a2aca0d464f6f2e1621effde811c7fd 2024-12-05T07:44:46,423 INFO [StoreOpener-2a2aca0d464f6f2e1621effde811c7fd-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2a2aca0d464f6f2e1621effde811c7fd columnFamilyName cf 2024-12-05T07:44:46,423 DEBUG [StoreOpener-2a2aca0d464f6f2e1621effde811c7fd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T07:44:46,424 INFO [StoreOpener-2a2aca0d464f6f2e1621effde811c7fd-1 {}] regionserver.HStore(327): Store=2a2aca0d464f6f2e1621effde811c7fd/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T07:44:46,425 DEBUG [RS_OPEN_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 2a2aca0d464f6f2e1621effde811c7fd 2024-12-05T07:44:46,426 DEBUG [RS_OPEN_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/data/default/TestHBaseWalOnEC/2a2aca0d464f6f2e1621effde811c7fd 2024-12-05T07:44:46,427 DEBUG [RS_OPEN_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/data/default/TestHBaseWalOnEC/2a2aca0d464f6f2e1621effde811c7fd 2024-12-05T07:44:46,427 DEBUG [RS_OPEN_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 2a2aca0d464f6f2e1621effde811c7fd 2024-12-05T07:44:46,428 DEBUG [RS_OPEN_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 2a2aca0d464f6f2e1621effde811c7fd 2024-12-05T07:44:46,432 DEBUG [RS_OPEN_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 2a2aca0d464f6f2e1621effde811c7fd 2024-12-05T07:44:46,439 DEBUG [RS_OPEN_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/data/default/TestHBaseWalOnEC/2a2aca0d464f6f2e1621effde811c7fd/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T07:44:46,440 INFO [RS_OPEN_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 2a2aca0d464f6f2e1621effde811c7fd; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65869452, jitterRate=-0.01846867799758911}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T07:44:46,440 DEBUG [RS_OPEN_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 2a2aca0d464f6f2e1621effde811c7fd 2024-12-05T07:44:46,441 DEBUG [RS_OPEN_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 2a2aca0d464f6f2e1621effde811c7fd: Running coprocessor pre-open hook at 1733384686418Writing region info on filesystem at 1733384686418Initializing all the Stores at 1733384686420 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733384686420Cleaning up temporary data from old regions at 1733384686428 (+8 ms)Running coprocessor post-open hooks at 1733384686440 (+12 ms)Region opened successfully at 1733384686441 (+1 ms) 2024-12-05T07:44:46,443 INFO [RS_OPEN_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733384685945.2a2aca0d464f6f2e1621effde811c7fd., pid=6, masterSystemTime=1733384686407 2024-12-05T07:44:46,447 DEBUG [RS_OPEN_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733384685945.2a2aca0d464f6f2e1621effde811c7fd. 2024-12-05T07:44:46,447 INFO [RS_OPEN_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733384685945.2a2aca0d464f6f2e1621effde811c7fd. 2024-12-05T07:44:46,449 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=2a2aca0d464f6f2e1621effde811c7fd, regionState=OPEN, openSeqNum=2, regionLocation=fc6cd38557f3,38861,1733384683069 2024-12-05T07:44:46,454 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2a2aca0d464f6f2e1621effde811c7fd, server=fc6cd38557f3,38861,1733384683069 because future has completed 2024-12-05T07:44:46,461 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-05T07:44:46,463 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 2a2aca0d464f6f2e1621effde811c7fd, server=fc6cd38557f3,38861,1733384683069 in 203 msec 2024-12-05T07:44:46,466 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-05T07:44:46,466 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=2a2aca0d464f6f2e1621effde811c7fd, ASSIGN in 372 msec 2024-12-05T07:44:46,468 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-05T07:44:46,469 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733384686468"}]},"ts":"1733384686468"} 2024-12-05T07:44:46,472 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-05T07:44:46,475 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-05T07:44:46,479 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 523 msec 2024-12-05T07:44:46,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33229 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T07:44:46,599 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-05T07:44:46,599 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-05T07:44:46,600 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T07:44:46,606 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-05T07:44:46,606 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T07:44:46,607 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-05T07:44:46,617 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733384685945.2a2aca0d464f6f2e1621effde811c7fd., hostname=fc6cd38557f3,38861,1733384683069, seqNum=2] 2024-12-05T07:44:46,618 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T07:44:46,624 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46528, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T07:44:46,635 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33229 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-05T07:44:46,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33229 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-05T07:44:46,645 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-05T07:44:46,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33229 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-05T07:44:46,648 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-05T07:44:46,649 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-05T07:44:46,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33229 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-05T07:44:46,813 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38861 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-05T07:44:46,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fc6cd38557f3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733384685945.2a2aca0d464f6f2e1621effde811c7fd. 2024-12-05T07:44:46,817 INFO [RS_FLUSH_OPERATIONS-regionserver/fc6cd38557f3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 2a2aca0d464f6f2e1621effde811c7fd 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-05T07:44:46,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fc6cd38557f3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/data/default/TestHBaseWalOnEC/2a2aca0d464f6f2e1621effde811c7fd/.tmp/cf/961993b338fd4ca0a9bfd21b588751d2 is 36, key is row/cf:cq/1733384686625/Put/seqid=0 2024-12-05T07:44:46,883 WARN [RS_FLUSH_OPERATIONS-regionserver/fc6cd38557f3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T07:44:46,884 WARN [RS_FLUSH_OPERATIONS-regionserver/fc6cd38557f3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T07:44:46,897 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2101294106_22 at /127.0.0.1:58834 [Receiving block BP-1277874143-172.17.0.2-1733384677684:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:45937:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58834 dst: /127.0.0.1:45937 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T07:44:46,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45937 is added to blk_-9223372036854775648_1025 (size=4787) 2024-12-05T07:44:46,906 WARN [RS_FLUSH_OPERATIONS-regionserver/fc6cd38557f3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T07:44:46,907 INFO [RS_FLUSH_OPERATIONS-regionserver/fc6cd38557f3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/data/default/TestHBaseWalOnEC/2a2aca0d464f6f2e1621effde811c7fd/.tmp/cf/961993b338fd4ca0a9bfd21b588751d2 2024-12-05T07:44:46,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fc6cd38557f3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/data/default/TestHBaseWalOnEC/2a2aca0d464f6f2e1621effde811c7fd/.tmp/cf/961993b338fd4ca0a9bfd21b588751d2 as hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/data/default/TestHBaseWalOnEC/2a2aca0d464f6f2e1621effde811c7fd/cf/961993b338fd4ca0a9bfd21b588751d2 2024-12-05T07:44:46,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33229 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-05T07:44:46,978 INFO [RS_FLUSH_OPERATIONS-regionserver/fc6cd38557f3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/data/default/TestHBaseWalOnEC/2a2aca0d464f6f2e1621effde811c7fd/cf/961993b338fd4ca0a9bfd21b588751d2, entries=1, sequenceid=5, filesize=4.7 K 2024-12-05T07:44:46,989 INFO [RS_FLUSH_OPERATIONS-regionserver/fc6cd38557f3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 2a2aca0d464f6f2e1621effde811c7fd in 169ms, sequenceid=5, compaction requested=false 2024-12-05T07:44:46,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fc6cd38557f3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-12-05T07:44:46,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fc6cd38557f3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 2a2aca0d464f6f2e1621effde811c7fd: 2024-12-05T07:44:46,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fc6cd38557f3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733384685945.2a2aca0d464f6f2e1621effde811c7fd. 2024-12-05T07:44:46,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fc6cd38557f3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-05T07:44:46,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33229 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-05T07:44:47,008 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-05T07:44:47,008 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 352 msec 2024-12-05T07:44:47,016 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 375 msec 2024-12-05T07:44:47,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35705 is added to blk_-9223372036854775773_1004 (size=42) 2024-12-05T07:44:47,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45937 is added to blk_-9223372036854775772_1004 (size=42) 2024-12-05T07:44:47,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35705 is added to blk_-9223372036854775725_1010 (size=34) 2024-12-05T07:44:47,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45937 is added to blk_-9223372036854775724_1010 (size=34) 2024-12-05T07:44:47,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45937 is added to blk_-9223372036854775741_1008 (size=1189) 2024-12-05T07:44:47,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35705 is added to blk_-9223372036854775740_1008 (size=1189) 2024-12-05T07:44:47,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45937 is added to blk_-9223372036854775757_1006 (size=196) 2024-12-05T07:44:47,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35705 is added to blk_-9223372036854775756_1006 (size=196) 2024-12-05T07:44:47,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33229 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-05T07:44:47,278 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-05T07:44:47,292 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-05T07:44:47,293 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-05T07:44:47,293 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T07:44:47,298 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T07:44:47,298 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T07:44:47,298 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T07:44:47,299 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-05T07:44:47,299 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=233511752, stopped=false 2024-12-05T07:44:47,299 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=fc6cd38557f3,33229,1733384682235 2024-12-05T07:44:47,348 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36931-0x101a5bcca860002, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T07:44:47,348 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39335-0x101a5bcca860003, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T07:44:47,348 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38861-0x101a5bcca860001, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T07:44:47,348 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33229-0x101a5bcca860000, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T07:44:47,348 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39335-0x101a5bcca860003, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:47,348 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36931-0x101a5bcca860002, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:47,348 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38861-0x101a5bcca860001, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:47,348 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33229-0x101a5bcca860000, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:47,348 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-05T07:44:47,349 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-05T07:44:47,349 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T07:44:47,349 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T07:44:47,350 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36931-0x101a5bcca860002, quorum=127.0.0.1:58368, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T07:44:47,350 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33229-0x101a5bcca860000, quorum=127.0.0.1:58368, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T07:44:47,350 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'fc6cd38557f3,38861,1733384683069' ***** 2024-12-05T07:44:47,350 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39335-0x101a5bcca860003, quorum=127.0.0.1:58368, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T07:44:47,350 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-05T07:44:47,350 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'fc6cd38557f3,36931,1733384683185' ***** 2024-12-05T07:44:47,350 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-05T07:44:47,350 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'fc6cd38557f3,39335,1733384683268' ***** 2024-12-05T07:44:47,350 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38861-0x101a5bcca860001, quorum=127.0.0.1:58368, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T07:44:47,350 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-05T07:44:47,350 INFO [RS:0;fc6cd38557f3:38861 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-05T07:44:47,350 INFO [RS:1;fc6cd38557f3:36931 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-05T07:44:47,350 INFO [RS:0;fc6cd38557f3:38861 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-05T07:44:47,350 INFO [RS:1;fc6cd38557f3:36931 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-05T07:44:47,350 INFO [RS:1;fc6cd38557f3:36931 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-05T07:44:47,350 INFO [RS:0;fc6cd38557f3:38861 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-05T07:44:47,351 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-05T07:44:47,351 INFO [RS:1;fc6cd38557f3:36931 {}] regionserver.HRegionServer(959): stopping server fc6cd38557f3,36931,1733384683185 2024-12-05T07:44:47,351 INFO [RS:1;fc6cd38557f3:36931 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T07:44:47,351 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-05T07:44:47,351 INFO [RS:1;fc6cd38557f3:36931 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;fc6cd38557f3:36931. 2024-12-05T07:44:47,351 INFO [RS:0;fc6cd38557f3:38861 {}] regionserver.HRegionServer(3091): Received CLOSE for 2a2aca0d464f6f2e1621effde811c7fd 2024-12-05T07:44:47,351 DEBUG [RS:1;fc6cd38557f3:36931 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T07:44:47,351 DEBUG [RS:1;fc6cd38557f3:36931 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T07:44:47,351 INFO [RS:2;fc6cd38557f3:39335 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-05T07:44:47,351 INFO [RS:2;fc6cd38557f3:39335 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-05T07:44:47,352 INFO [RS:2;fc6cd38557f3:39335 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-05T07:44:47,352 INFO [RS:2;fc6cd38557f3:39335 {}] regionserver.HRegionServer(959): stopping server fc6cd38557f3,39335,1733384683268 2024-12-05T07:44:47,352 INFO [RS:2;fc6cd38557f3:39335 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T07:44:47,352 INFO [RS:2;fc6cd38557f3:39335 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;fc6cd38557f3:39335. 2024-12-05T07:44:47,351 INFO [RS:1;fc6cd38557f3:36931 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-05T07:44:47,352 DEBUG [RS:2;fc6cd38557f3:39335 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T07:44:47,352 INFO [RS:1;fc6cd38557f3:36931 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-05T07:44:47,352 DEBUG [RS_CLOSE_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 2a2aca0d464f6f2e1621effde811c7fd, disabling compactions & flushes 2024-12-05T07:44:47,352 DEBUG [RS:2;fc6cd38557f3:39335 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T07:44:47,352 INFO [RS:1;fc6cd38557f3:36931 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-05T07:44:47,352 INFO [RS_CLOSE_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733384685945.2a2aca0d464f6f2e1621effde811c7fd. 2024-12-05T07:44:47,352 INFO [RS:1;fc6cd38557f3:36931 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-05T07:44:47,352 DEBUG [RS_CLOSE_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733384685945.2a2aca0d464f6f2e1621effde811c7fd. 2024-12-05T07:44:47,352 DEBUG [RS_CLOSE_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733384685945.2a2aca0d464f6f2e1621effde811c7fd. after waiting 0 ms 2024-12-05T07:44:47,352 INFO [RS:2;fc6cd38557f3:39335 {}] regionserver.HRegionServer(976): stopping server fc6cd38557f3,39335,1733384683268; all regions closed. 2024-12-05T07:44:47,352 DEBUG [RS_CLOSE_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733384685945.2a2aca0d464f6f2e1621effde811c7fd. 2024-12-05T07:44:47,351 INFO [RS:0;fc6cd38557f3:38861 {}] regionserver.HRegionServer(959): stopping server fc6cd38557f3,38861,1733384683069 2024-12-05T07:44:47,351 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-05T07:44:47,353 INFO [RS:0;fc6cd38557f3:38861 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T07:44:47,353 INFO [RS:0;fc6cd38557f3:38861 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;fc6cd38557f3:38861. 2024-12-05T07:44:47,353 DEBUG [RS:0;fc6cd38557f3:38861 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T07:44:47,353 DEBUG [RS:0;fc6cd38557f3:38861 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T07:44:47,353 INFO [RS:0;fc6cd38557f3:38861 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-05T07:44:47,353 DEBUG [RS:0;fc6cd38557f3:38861 {}] regionserver.HRegionServer(1325): Online Regions={2a2aca0d464f6f2e1621effde811c7fd=TestHBaseWalOnEC,,1733384685945.2a2aca0d464f6f2e1621effde811c7fd.} 2024-12-05T07:44:47,353 INFO [RS:1;fc6cd38557f3:36931 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-05T07:44:47,353 DEBUG [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-05T07:44:47,353 DEBUG [RS:1;fc6cd38557f3:36931 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-05T07:44:47,353 INFO [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-05T07:44:47,353 DEBUG [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-05T07:44:47,353 DEBUG [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-05T07:44:47,354 DEBUG [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-05T07:44:47,354 DEBUG [RS:0;fc6cd38557f3:38861 {}] regionserver.HRegionServer(1351): Waiting on 2a2aca0d464f6f2e1621effde811c7fd 2024-12-05T07:44:47,354 DEBUG [RS:1;fc6cd38557f3:36931 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-05T07:44:47,354 INFO [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-05T07:44:47,360 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/WALs/fc6cd38557f3,39335,1733384683268/fc6cd38557f3%2C39335%2C1733384683268.1733384684971 not finished, retry = 0 2024-12-05T07:44:47,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40617 is added to blk_1073741826_1016 (size=93) 2024-12-05T07:44:47,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45937 is added to blk_1073741826_1016 (size=93) 2024-12-05T07:44:47,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35705 is added to blk_1073741826_1016 (size=93) 2024-12-05T07:44:47,374 DEBUG [RS_CLOSE_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/data/default/TestHBaseWalOnEC/2a2aca0d464f6f2e1621effde811c7fd/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-05T07:44:47,376 INFO [RS_CLOSE_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733384685945.2a2aca0d464f6f2e1621effde811c7fd. 2024-12-05T07:44:47,377 DEBUG [RS_CLOSE_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 2a2aca0d464f6f2e1621effde811c7fd: Waiting for close lock at 1733384687352Running coprocessor pre-close hooks at 1733384687352Disabling compacts and flushes for region at 1733384687352Disabling writes for close at 1733384687352Writing region close event to WAL at 1733384687355 (+3 ms)Running coprocessor post-close hooks at 1733384687375 (+20 ms)Closed at 1733384687376 (+1 ms) 2024-12-05T07:44:47,377 DEBUG [RS_CLOSE_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733384685945.2a2aca0d464f6f2e1621effde811c7fd. 2024-12-05T07:44:47,394 DEBUG [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/data/hbase/meta/1588230740/.tmp/info/e4dae3168bac4cf9b397e3b6e6ce8695 is 153, key is TestHBaseWalOnEC,,1733384685945.2a2aca0d464f6f2e1621effde811c7fd./info:regioninfo/1733384686449/Put/seqid=0 2024-12-05T07:44:47,397 WARN [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T07:44:47,398 WARN [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T07:44:47,403 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1089434548_22 at /127.0.0.1:58884 [Receiving block BP-1277874143-172.17.0.2-1733384677684:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:45937:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58884 dst: /127.0.0.1:45937 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T07:44:47,404 INFO [regionserver/fc6cd38557f3:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T07:44:47,404 INFO [regionserver/fc6cd38557f3:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T07:44:47,408 INFO [regionserver/fc6cd38557f3:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T07:44:47,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45937 is added to blk_-9223372036854775632_1027 (size=6637) 2024-12-05T07:44:47,412 WARN [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T07:44:47,412 INFO [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/data/hbase/meta/1588230740/.tmp/info/e4dae3168bac4cf9b397e3b6e6ce8695 2024-12-05T07:44:47,451 DEBUG [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/data/hbase/meta/1588230740/.tmp/ns/482e80c5413e407c9f48d720016748f8 is 43, key is default/ns:d/1733384685703/Put/seqid=0 2024-12-05T07:44:47,453 WARN [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T07:44:47,453 WARN [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T07:44:47,459 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1089434548_22 at /127.0.0.1:58900 [Receiving block BP-1277874143-172.17.0.2-1733384677684:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:45937:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58900 dst: /127.0.0.1:45937 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T07:44:47,468 DEBUG [RS:2;fc6cd38557f3:39335 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/oldWALs 2024-12-05T07:44:47,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45937 is added to blk_-9223372036854775616_1029 (size=5153) 2024-12-05T07:44:47,468 INFO [RS:2;fc6cd38557f3:39335 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL fc6cd38557f3%2C39335%2C1733384683268:(num 1733384684971) 2024-12-05T07:44:47,468 DEBUG [RS:2;fc6cd38557f3:39335 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T07:44:47,468 INFO [RS:2;fc6cd38557f3:39335 {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T07:44:47,468 INFO [RS:2;fc6cd38557f3:39335 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T07:44:47,468 INFO [RS:2;fc6cd38557f3:39335 {}] hbase.ChoreService(370): Chore service for: regionserver/fc6cd38557f3:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-05T07:44:47,469 WARN [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T07:44:47,469 INFO [RS:2;fc6cd38557f3:39335 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-05T07:44:47,469 INFO [RS:2;fc6cd38557f3:39335 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-05T07:44:47,469 INFO [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/data/hbase/meta/1588230740/.tmp/ns/482e80c5413e407c9f48d720016748f8 2024-12-05T07:44:47,469 INFO [RS:2;fc6cd38557f3:39335 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-05T07:44:47,469 INFO [regionserver/fc6cd38557f3:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T07:44:47,469 INFO [RS:2;fc6cd38557f3:39335 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T07:44:47,469 INFO [RS:2;fc6cd38557f3:39335 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39335 2024-12-05T07:44:47,498 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33229-0x101a5bcca860000, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T07:44:47,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39335-0x101a5bcca860003, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/fc6cd38557f3,39335,1733384683268 2024-12-05T07:44:47,500 INFO [RS:2;fc6cd38557f3:39335 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T07:44:47,501 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [fc6cd38557f3,39335,1733384683268] 2024-12-05T07:44:47,506 DEBUG [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/data/hbase/meta/1588230740/.tmp/table/729e44749e434f56a2e128a34df5c45e is 52, key is TestHBaseWalOnEC/table:state/1733384686468/Put/seqid=0 2024-12-05T07:44:47,508 WARN [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T07:44:47,508 WARN [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T07:44:47,511 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1089434548_22 at /127.0.0.1:58910 [Receiving block BP-1277874143-172.17.0.2-1733384677684:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:45937:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58910 dst: /127.0.0.1:45937 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T07:44:47,515 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/fc6cd38557f3,39335,1733384683268 already deleted, retry=false 2024-12-05T07:44:47,515 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; fc6cd38557f3,39335,1733384683268 expired; onlineServers=2 2024-12-05T07:44:47,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45937 is added to blk_-9223372036854775600_1031 (size=5249) 2024-12-05T07:44:47,519 WARN [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T07:44:47,519 INFO [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/data/hbase/meta/1588230740/.tmp/table/729e44749e434f56a2e128a34df5c45e 2024-12-05T07:44:47,533 DEBUG [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/data/hbase/meta/1588230740/.tmp/info/e4dae3168bac4cf9b397e3b6e6ce8695 as hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/data/hbase/meta/1588230740/info/e4dae3168bac4cf9b397e3b6e6ce8695 2024-12-05T07:44:47,546 INFO [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/data/hbase/meta/1588230740/info/e4dae3168bac4cf9b397e3b6e6ce8695, entries=10, sequenceid=11, filesize=6.5 K 2024-12-05T07:44:47,549 DEBUG [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/data/hbase/meta/1588230740/.tmp/ns/482e80c5413e407c9f48d720016748f8 as hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/data/hbase/meta/1588230740/ns/482e80c5413e407c9f48d720016748f8 2024-12-05T07:44:47,554 INFO [RS:0;fc6cd38557f3:38861 {}] regionserver.HRegionServer(976): stopping server fc6cd38557f3,38861,1733384683069; all regions closed. 2024-12-05T07:44:47,554 DEBUG [RS:1;fc6cd38557f3:36931 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-05T07:44:47,560 INFO [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/data/hbase/meta/1588230740/ns/482e80c5413e407c9f48d720016748f8, entries=2, sequenceid=11, filesize=5.0 K 2024-12-05T07:44:47,563 DEBUG [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/data/hbase/meta/1588230740/.tmp/table/729e44749e434f56a2e128a34df5c45e as hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/data/hbase/meta/1588230740/table/729e44749e434f56a2e128a34df5c45e 2024-12-05T07:44:47,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45937 is added to blk_1073741828_1018 (size=1298) 2024-12-05T07:44:47,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40617 is added to blk_1073741828_1018 (size=1298) 2024-12-05T07:44:47,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35705 is added to blk_1073741828_1018 (size=1298) 2024-12-05T07:44:47,574 DEBUG [RS:0;fc6cd38557f3:38861 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/oldWALs 2024-12-05T07:44:47,574 INFO [RS:0;fc6cd38557f3:38861 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL fc6cd38557f3%2C38861%2C1733384683069:(num 1733384684973) 2024-12-05T07:44:47,574 DEBUG [RS:0;fc6cd38557f3:38861 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T07:44:47,574 INFO [RS:0;fc6cd38557f3:38861 {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T07:44:47,574 INFO [RS:0;fc6cd38557f3:38861 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T07:44:47,574 INFO [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/data/hbase/meta/1588230740/table/729e44749e434f56a2e128a34df5c45e, entries=2, sequenceid=11, filesize=5.1 K 2024-12-05T07:44:47,575 INFO [RS:0;fc6cd38557f3:38861 {}] hbase.ChoreService(370): Chore service for: regionserver/fc6cd38557f3:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-05T07:44:47,575 INFO [RS:0;fc6cd38557f3:38861 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-05T07:44:47,575 INFO [RS:0;fc6cd38557f3:38861 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-05T07:44:47,575 INFO [regionserver/fc6cd38557f3:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T07:44:47,575 INFO [RS:0;fc6cd38557f3:38861 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-05T07:44:47,575 INFO [RS:0;fc6cd38557f3:38861 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T07:44:47,575 INFO [RS:0;fc6cd38557f3:38861 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38861 2024-12-05T07:44:47,576 INFO [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 222ms, sequenceid=11, compaction requested=false 2024-12-05T07:44:47,577 DEBUG [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-05T07:44:47,590 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33229-0x101a5bcca860000, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T07:44:47,590 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38861-0x101a5bcca860001, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/fc6cd38557f3,38861,1733384683069 2024-12-05T07:44:47,590 INFO [RS:0;fc6cd38557f3:38861 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T07:44:47,604 DEBUG [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-05T07:44:47,605 DEBUG [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-05T07:44:47,605 INFO [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-05T07:44:47,605 DEBUG [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733384687353Running coprocessor pre-close hooks at 1733384687353Disabling compacts and flushes for region at 1733384687353Disabling writes for close at 1733384687354 (+1 ms)Obtaining lock to block concurrent updates at 1733384687354Preparing flush snapshotting stores in 1588230740 at 1733384687354Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733384687355 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733384687356 (+1 ms)Flushing 1588230740/info: creating writer at 1733384687357 (+1 ms)Flushing 1588230740/info: appending metadata at 1733384687391 (+34 ms)Flushing 1588230740/info: closing flushed file at 1733384687391Flushing 1588230740/ns: creating writer at 1733384687426 (+35 ms)Flushing 1588230740/ns: appending metadata at 1733384687449 (+23 ms)Flushing 1588230740/ns: closing flushed file at 1733384687449Flushing 1588230740/table: creating writer at 1733384687479 (+30 ms)Flushing 1588230740/table: appending metadata at 1733384687505 (+26 ms)Flushing 1588230740/table: closing flushed file at 1733384687505Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@b7496e1: reopening flushed file at 1733384687531 (+26 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@67314e6d: reopening flushed file at 1733384687547 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@b8d2913: reopening flushed file at 1733384687561 (+14 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 222ms, sequenceid=11, compaction requested=false at 1733384687576 (+15 ms)Writing region close event to WAL at 1733384687589 (+13 ms)Running coprocessor post-close hooks at 1733384687605 (+16 ms)Closed at 1733384687605 2024-12-05T07:44:47,605 DEBUG [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-05T07:44:47,607 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39335-0x101a5bcca860003, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T07:44:47,607 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39335-0x101a5bcca860003, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T07:44:47,607 INFO [RS:2;fc6cd38557f3:39335 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T07:44:47,607 INFO [RS:2;fc6cd38557f3:39335 {}] regionserver.HRegionServer(1031): Exiting; stopping=fc6cd38557f3,39335,1733384683268; zookeeper connection closed. 2024-12-05T07:44:47,608 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6e3d1a7b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6e3d1a7b 2024-12-05T07:44:47,656 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [fc6cd38557f3,38861,1733384683069] 2024-12-05T07:44:47,664 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/fc6cd38557f3,38861,1733384683069 already deleted, retry=false 2024-12-05T07:44:47,665 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; fc6cd38557f3,38861,1733384683069 expired; onlineServers=1 2024-12-05T07:44:47,754 INFO [RS:1;fc6cd38557f3:36931 {}] regionserver.HRegionServer(976): stopping server fc6cd38557f3,36931,1733384683185; all regions closed. 2024-12-05T07:44:47,757 INFO [RS:0;fc6cd38557f3:38861 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T07:44:47,757 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38861-0x101a5bcca860001, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T07:44:47,757 INFO [RS:0;fc6cd38557f3:38861 {}] regionserver.HRegionServer(1031): Exiting; stopping=fc6cd38557f3,38861,1733384683069; zookeeper connection closed. 2024-12-05T07:44:47,757 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38861-0x101a5bcca860001, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T07:44:47,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35705 is added to blk_1073741829_1019 (size=2751) 2024-12-05T07:44:47,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40617 is added to blk_1073741829_1019 (size=2751) 2024-12-05T07:44:47,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45937 is added to blk_1073741829_1019 (size=2751) 2024-12-05T07:44:47,759 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@12ec2a0 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@12ec2a0 2024-12-05T07:44:47,763 DEBUG [RS:1;fc6cd38557f3:36931 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/oldWALs 2024-12-05T07:44:47,763 INFO [RS:1;fc6cd38557f3:36931 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL fc6cd38557f3%2C36931%2C1733384683185.meta:.meta(num 1733384685470) 2024-12-05T07:44:47,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40617 is added to blk_1073741827_1017 (size=93) 2024-12-05T07:44:47,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35705 is added to blk_1073741827_1017 (size=93) 2024-12-05T07:44:47,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45937 is added to blk_1073741827_1017 (size=93) 2024-12-05T07:44:47,769 DEBUG [RS:1;fc6cd38557f3:36931 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/oldWALs 2024-12-05T07:44:47,769 INFO [RS:1;fc6cd38557f3:36931 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL fc6cd38557f3%2C36931%2C1733384683185:(num 1733384684971) 2024-12-05T07:44:47,769 DEBUG [RS:1;fc6cd38557f3:36931 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T07:44:47,769 INFO [RS:1;fc6cd38557f3:36931 {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T07:44:47,770 INFO [RS:1;fc6cd38557f3:36931 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T07:44:47,770 INFO [RS:1;fc6cd38557f3:36931 {}] hbase.ChoreService(370): Chore service for: regionserver/fc6cd38557f3:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-05T07:44:47,770 INFO [RS:1;fc6cd38557f3:36931 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T07:44:47,770 INFO [regionserver/fc6cd38557f3:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T07:44:47,770 INFO [RS:1;fc6cd38557f3:36931 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36931 2024-12-05T07:44:47,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33229-0x101a5bcca860000, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T07:44:47,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36931-0x101a5bcca860002, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/fc6cd38557f3,36931,1733384683185 2024-12-05T07:44:47,798 INFO [RS:1;fc6cd38557f3:36931 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T07:44:47,799 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [fc6cd38557f3,36931,1733384683185] 2024-12-05T07:44:47,815 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/fc6cd38557f3,36931,1733384683185 already deleted, retry=false 2024-12-05T07:44:47,815 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; fc6cd38557f3,36931,1733384683185 expired; onlineServers=0 2024-12-05T07:44:47,815 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'fc6cd38557f3,33229,1733384682235' ***** 2024-12-05T07:44:47,815 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-05T07:44:47,815 INFO [M:0;fc6cd38557f3:33229 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T07:44:47,815 INFO [M:0;fc6cd38557f3:33229 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T07:44:47,816 DEBUG [M:0;fc6cd38557f3:33229 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-05T07:44:47,816 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-05T07:44:47,816 DEBUG [M:0;fc6cd38557f3:33229 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-05T07:44:47,816 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster-HFileCleaner.large.0-1733384684589 {}] cleaner.HFileCleaner(306): Exit Thread[master/fc6cd38557f3:0:becomeActiveMaster-HFileCleaner.large.0-1733384684589,5,FailOnTimeoutGroup] 2024-12-05T07:44:47,816 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster-HFileCleaner.small.0-1733384684590 {}] cleaner.HFileCleaner(306): Exit Thread[master/fc6cd38557f3:0:becomeActiveMaster-HFileCleaner.small.0-1733384684590,5,FailOnTimeoutGroup] 2024-12-05T07:44:47,816 INFO [M:0;fc6cd38557f3:33229 {}] hbase.ChoreService(370): Chore service for: master/fc6cd38557f3:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-05T07:44:47,816 INFO [M:0;fc6cd38557f3:33229 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T07:44:47,816 DEBUG [M:0;fc6cd38557f3:33229 {}] master.HMaster(1795): Stopping service threads 2024-12-05T07:44:47,816 INFO [M:0;fc6cd38557f3:33229 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-05T07:44:47,816 INFO [M:0;fc6cd38557f3:33229 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-05T07:44:47,817 INFO [M:0;fc6cd38557f3:33229 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-05T07:44:47,817 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-05T07:44:47,823 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33229-0x101a5bcca860000, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-05T07:44:47,823 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33229-0x101a5bcca860000, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:47,823 DEBUG [M:0;fc6cd38557f3:33229 {}] zookeeper.ZKUtil(347): master:33229-0x101a5bcca860000, quorum=127.0.0.1:58368, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-05T07:44:47,823 WARN [M:0;fc6cd38557f3:33229 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-05T07:44:47,824 INFO [M:0;fc6cd38557f3:33229 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/.lastflushedseqids 2024-12-05T07:44:47,834 WARN [M:0;fc6cd38557f3:33229 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T07:44:47,834 WARN [M:0;fc6cd38557f3:33229 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T07:44:47,837 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_505696893_22 at /127.0.0.1:58932 [Receiving block BP-1277874143-172.17.0.2-1733384677684:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:45937:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58932 dst: /127.0.0.1:45937 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T07:44:47,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45937 is added to blk_-9223372036854775584_1033 (size=127) 2024-12-05T07:44:47,841 WARN [M:0;fc6cd38557f3:33229 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T07:44:47,841 INFO [M:0;fc6cd38557f3:33229 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-05T07:44:47,841 INFO [M:0;fc6cd38557f3:33229 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-05T07:44:47,842 DEBUG [M:0;fc6cd38557f3:33229 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-05T07:44:47,842 INFO [M:0;fc6cd38557f3:33229 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T07:44:47,842 DEBUG [M:0;fc6cd38557f3:33229 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T07:44:47,842 DEBUG [M:0;fc6cd38557f3:33229 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-05T07:44:47,842 DEBUG [M:0;fc6cd38557f3:33229 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T07:44:47,842 INFO [M:0;fc6cd38557f3:33229 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.85 KB heapSize=34.13 KB 2024-12-05T07:44:47,862 DEBUG [M:0;fc6cd38557f3:33229 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ab60942444f04dd1b1e5258e2e7c10ad is 82, key is hbase:meta,,1/info:regioninfo/1733384685566/Put/seqid=0 2024-12-05T07:44:47,864 WARN [M:0;fc6cd38557f3:33229 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T07:44:47,864 WARN [M:0;fc6cd38557f3:33229 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T07:44:47,867 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_505696893_22 at /127.0.0.1:44516 [Receiving block BP-1277874143-172.17.0.2-1733384677684:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:35705:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44516 dst: /127.0.0.1:35705 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T07:44:47,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35705 is added to blk_-9223372036854775568_1035 (size=5672) 2024-12-05T07:44:47,871 WARN [M:0;fc6cd38557f3:33229 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T07:44:47,871 INFO [M:0;fc6cd38557f3:33229 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ab60942444f04dd1b1e5258e2e7c10ad 2024-12-05T07:44:47,895 DEBUG [M:0;fc6cd38557f3:33229 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2ccf0c5b15db484bbba2f91c51fbcc03 is 749, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733384686478/Put/seqid=0 2024-12-05T07:44:47,898 WARN [M:0;fc6cd38557f3:33229 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T07:44:47,898 WARN [M:0;fc6cd38557f3:33229 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T07:44:47,904 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_505696893_22 at /127.0.0.1:55126 [Receiving block BP-1277874143-172.17.0.2-1733384677684:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:40617:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55126 dst: /127.0.0.1:40617 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T07:44:47,906 INFO [RS:1;fc6cd38557f3:36931 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T07:44:47,906 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36931-0x101a5bcca860002, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T07:44:47,906 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36931-0x101a5bcca860002, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T07:44:47,906 INFO [RS:1;fc6cd38557f3:36931 {}] regionserver.HRegionServer(1031): Exiting; stopping=fc6cd38557f3,36931,1733384683185; zookeeper connection closed. 2024-12-05T07:44:47,910 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1a728254 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1a728254 2024-12-05T07:44:47,911 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-05T07:44:47,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40617 is added to blk_-9223372036854775552_1037 (size=6441) 2024-12-05T07:44:47,912 WARN [M:0;fc6cd38557f3:33229 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T07:44:47,913 INFO [M:0;fc6cd38557f3:33229 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.17 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2ccf0c5b15db484bbba2f91c51fbcc03 2024-12-05T07:44:47,939 DEBUG [M:0;fc6cd38557f3:33229 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a40eb0c8220f40208635d9940808b2a0 is 69, key is fc6cd38557f3,36931,1733384683185/rs:state/1733384684685/Put/seqid=0 2024-12-05T07:44:47,941 WARN [M:0;fc6cd38557f3:33229 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T07:44:47,941 WARN [M:0;fc6cd38557f3:33229 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-05T07:44:47,944 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_505696893_22 at /127.0.0.1:58962 [Receiving block BP-1277874143-172.17.0.2-1733384677684:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:45937:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58962 dst: /127.0.0.1:45937 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T07:44:47,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45937 is added to blk_-9223372036854775536_1039 (size=5294) 2024-12-05T07:44:47,953 WARN [M:0;fc6cd38557f3:33229 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-05T07:44:47,954 INFO [M:0;fc6cd38557f3:33229 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a40eb0c8220f40208635d9940808b2a0 2024-12-05T07:44:47,963 DEBUG [M:0;fc6cd38557f3:33229 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ab60942444f04dd1b1e5258e2e7c10ad as hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ab60942444f04dd1b1e5258e2e7c10ad 2024-12-05T07:44:47,972 INFO [M:0;fc6cd38557f3:33229 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ab60942444f04dd1b1e5258e2e7c10ad, entries=8, sequenceid=72, filesize=5.5 K 2024-12-05T07:44:47,974 DEBUG [M:0;fc6cd38557f3:33229 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2ccf0c5b15db484bbba2f91c51fbcc03 as hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/2ccf0c5b15db484bbba2f91c51fbcc03 2024-12-05T07:44:47,983 INFO [M:0;fc6cd38557f3:33229 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/2ccf0c5b15db484bbba2f91c51fbcc03, entries=8, sequenceid=72, filesize=6.3 K 2024-12-05T07:44:47,986 DEBUG [M:0;fc6cd38557f3:33229 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a40eb0c8220f40208635d9940808b2a0 as hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/a40eb0c8220f40208635d9940808b2a0 2024-12-05T07:44:47,995 INFO [M:0;fc6cd38557f3:33229 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/a40eb0c8220f40208635d9940808b2a0, entries=3, sequenceid=72, filesize=5.2 K 2024-12-05T07:44:47,996 INFO [M:0;fc6cd38557f3:33229 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.85 KB/27492, heapSize ~33.84 KB/34648, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 154ms, sequenceid=72, compaction requested=false 2024-12-05T07:44:47,998 INFO [M:0;fc6cd38557f3:33229 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T07:44:47,998 DEBUG [M:0;fc6cd38557f3:33229 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733384687842Disabling compacts and flushes for region at 1733384687842Disabling writes for close at 1733384687842Obtaining lock to block concurrent updates at 1733384687842Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733384687842Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27492, getHeapSize=34888, getOffHeapSize=0, getCellsCount=85 at 1733384687842Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733384687843 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733384687843Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733384687862 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733384687862Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733384687880 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733384687895 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733384687895Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733384687921 (+26 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733384687938 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733384687938Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6920570f: reopening flushed file at 1733384687961 (+23 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@71b60c82: reopening flushed file at 1733384687973 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1ba054ee: reopening flushed file at 1733384687983 (+10 ms)Finished flush of dataSize ~26.85 KB/27492, heapSize ~33.84 KB/34648, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 154ms, sequenceid=72, compaction requested=false at 1733384687996 (+13 ms)Writing region close event to WAL at 1733384687997 (+1 ms)Closed at 1733384687997 2024-12-05T07:44:48,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40617 is added to blk_1073741825_1011 (size=32695) 2024-12-05T07:44:48,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35705 is added to blk_1073741825_1011 (size=32695) 2024-12-05T07:44:48,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45937 is added to blk_1073741825_1011 (size=32695) 2024-12-05T07:44:48,002 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T07:44:48,002 INFO [M:0;fc6cd38557f3:33229 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-05T07:44:48,003 INFO [M:0;fc6cd38557f3:33229 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33229 2024-12-05T07:44:48,003 INFO [M:0;fc6cd38557f3:33229 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T07:44:48,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33229-0x101a5bcca860000, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T07:44:48,131 INFO [M:0;fc6cd38557f3:33229 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T07:44:48,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33229-0x101a5bcca860000, quorum=127.0.0.1:58368, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T07:44:48,172 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7f750918{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T07:44:48,174 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@86bf2a7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T07:44:48,174 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T07:44:48,175 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1023f385{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T07:44:48,175 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7e1f796{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a/hadoop.log.dir/,STOPPED} 2024-12-05T07:44:48,177 WARN [BP-1277874143-172.17.0.2-1733384677684 heartbeating to localhost/127.0.0.1:40085 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T07:44:48,177 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T07:44:48,177 WARN [BP-1277874143-172.17.0.2-1733384677684 heartbeating to localhost/127.0.0.1:40085 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1277874143-172.17.0.2-1733384677684 (Datanode Uuid a9e8ab7e-d298-4ada-9dfd-e58886528ebb) service to localhost/127.0.0.1:40085 2024-12-05T07:44:48,177 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T07:44:48,178 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a/cluster_d89fa536-fc25-b305-ed6a-5c67ccfe545d/data/data5/current/BP-1277874143-172.17.0.2-1733384677684 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T07:44:48,179 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a/cluster_d89fa536-fc25-b305-ed6a-5c67ccfe545d/data/data6/current/BP-1277874143-172.17.0.2-1733384677684 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T07:44:48,179 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T07:44:48,181 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@26b068f7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T07:44:48,181 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5739b847{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T07:44:48,182 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T07:44:48,182 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2c2c5be{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T07:44:48,182 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a91ec1e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a/hadoop.log.dir/,STOPPED} 2024-12-05T07:44:48,184 WARN [BP-1277874143-172.17.0.2-1733384677684 heartbeating to localhost/127.0.0.1:40085 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T07:44:48,184 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T07:44:48,184 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T07:44:48,184 WARN [BP-1277874143-172.17.0.2-1733384677684 heartbeating to localhost/127.0.0.1:40085 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1277874143-172.17.0.2-1733384677684 (Datanode Uuid c6897825-0977-41e2-8956-53205b2691df) service to localhost/127.0.0.1:40085 2024-12-05T07:44:48,185 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a/cluster_d89fa536-fc25-b305-ed6a-5c67ccfe545d/data/data3/current/BP-1277874143-172.17.0.2-1733384677684 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T07:44:48,185 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a/cluster_d89fa536-fc25-b305-ed6a-5c67ccfe545d/data/data4/current/BP-1277874143-172.17.0.2-1733384677684 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T07:44:48,185 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T07:44:48,187 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7e705dc8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T07:44:48,187 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2ad1569e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T07:44:48,188 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T07:44:48,188 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@17f1c7fc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T07:44:48,188 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@32fec40a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a/hadoop.log.dir/,STOPPED} 2024-12-05T07:44:48,189 WARN [BP-1277874143-172.17.0.2-1733384677684 heartbeating to localhost/127.0.0.1:40085 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T07:44:48,189 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T07:44:48,189 WARN [BP-1277874143-172.17.0.2-1733384677684 heartbeating to localhost/127.0.0.1:40085 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1277874143-172.17.0.2-1733384677684 (Datanode Uuid 6ece4d20-e2f1-4f27-83d4-007dae01a642) service to localhost/127.0.0.1:40085 2024-12-05T07:44:48,189 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T07:44:48,189 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a/cluster_d89fa536-fc25-b305-ed6a-5c67ccfe545d/data/data1/current/BP-1277874143-172.17.0.2-1733384677684 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T07:44:48,189 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a/cluster_d89fa536-fc25-b305-ed6a-5c67ccfe545d/data/data2/current/BP-1277874143-172.17.0.2-1733384677684 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T07:44:48,190 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T07:44:48,197 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@76e4c45c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-05T07:44:48,197 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T07:44:48,197 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T07:44:48,198 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T07:44:48,198 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a/hadoop.log.dir/,STOPPED} 2024-12-05T07:44:48,207 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-05T07:44:48,240 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-05T07:44:48,246 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=88 (was 155), OpenFileDescriptor=443 (was 393) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=304 (was 275) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=7009 (was 7391) 2024-12-05T07:44:48,253 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=88, OpenFileDescriptor=443, MaxFileDescriptor=1048576, SystemLoadAverage=304, ProcessCount=11, AvailableMemoryMB=7009 2024-12-05T07:44:48,253 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-05T07:44:48,254 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a/hadoop.log.dir so I do NOT create it in target/test-data/879b195a-6d70-226d-0978-9f63c906d950 2024-12-05T07:44:48,254 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4b48d31a-e2e4-94a3-9866-5b4f1232720a/hadoop.tmp.dir so I do NOT create it in target/test-data/879b195a-6d70-226d-0978-9f63c906d950 2024-12-05T07:44:48,254 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/879b195a-6d70-226d-0978-9f63c906d950/cluster_31b7b7bd-a015-2a30-f4f5-858b91d603d0, deleteOnExit=true 2024-12-05T07:44:48,254 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-05T07:44:48,255 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/879b195a-6d70-226d-0978-9f63c906d950/test.cache.data in system properties and HBase conf 2024-12-05T07:44:48,255 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/879b195a-6d70-226d-0978-9f63c906d950/hadoop.tmp.dir in system properties and HBase conf 2024-12-05T07:44:48,255 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/879b195a-6d70-226d-0978-9f63c906d950/hadoop.log.dir in system properties and HBase conf 2024-12-05T07:44:48,255 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/879b195a-6d70-226d-0978-9f63c906d950/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-05T07:44:48,255 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/879b195a-6d70-226d-0978-9f63c906d950/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-05T07:44:48,255 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-05T07:44:48,256 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-05T07:44:48,256 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/879b195a-6d70-226d-0978-9f63c906d950/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-05T07:44:48,256 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/879b195a-6d70-226d-0978-9f63c906d950/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-05T07:44:48,256 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/879b195a-6d70-226d-0978-9f63c906d950/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-05T07:44:48,256 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/879b195a-6d70-226d-0978-9f63c906d950/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-05T07:44:48,256 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/879b195a-6d70-226d-0978-9f63c906d950/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-05T07:44:48,256 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/879b195a-6d70-226d-0978-9f63c906d950/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-05T07:44:48,256 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/879b195a-6d70-226d-0978-9f63c906d950/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-05T07:44:48,257 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/879b195a-6d70-226d-0978-9f63c906d950/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-05T07:44:48,257 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/879b195a-6d70-226d-0978-9f63c906d950/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-05T07:44:48,257 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/879b195a-6d70-226d-0978-9f63c906d950/nfs.dump.dir in system properties and HBase conf 2024-12-05T07:44:48,257 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/879b195a-6d70-226d-0978-9f63c906d950/java.io.tmpdir in system properties and HBase conf 2024-12-05T07:44:48,257 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/879b195a-6d70-226d-0978-9f63c906d950/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-05T07:44:48,257 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/879b195a-6d70-226d-0978-9f63c906d950/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-05T07:44:48,257 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/879b195a-6d70-226d-0978-9f63c906d950/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-05T07:44:48,583 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T07:44:48,588 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T07:44:48,589 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T07:44:48,590 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T07:44:48,590 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-05T07:44:48,591 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T07:44:48,591 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@760f4a1c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/879b195a-6d70-226d-0978-9f63c906d950/hadoop.log.dir/,AVAILABLE} 2024-12-05T07:44:48,592 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5fbe2fc0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T07:44:48,697 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2385e487{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/879b195a-6d70-226d-0978-9f63c906d950/java.io.tmpdir/jetty-localhost-33281-hadoop-hdfs-3_4_1-tests_jar-_-any-1579005240905769918/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-05T07:44:48,698 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@722f6ac4{HTTP/1.1, (http/1.1)}{localhost:33281} 2024-12-05T07:44:48,698 INFO [Time-limited test {}] server.Server(415): Started @13408ms 2024-12-05T07:44:48,908 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T07:44:48,912 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T07:44:48,918 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T07:44:48,918 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T07:44:48,918 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-05T07:44:48,919 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3f7cb10{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/879b195a-6d70-226d-0978-9f63c906d950/hadoop.log.dir/,AVAILABLE} 2024-12-05T07:44:48,919 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@427407e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T07:44:49,022 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@19f40ccf{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/879b195a-6d70-226d-0978-9f63c906d950/java.io.tmpdir/jetty-localhost-34549-hadoop-hdfs-3_4_1-tests_jar-_-any-13423044652384729470/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T07:44:49,023 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@78de12c2{HTTP/1.1, (http/1.1)}{localhost:34549} 2024-12-05T07:44:49,023 INFO [Time-limited test {}] server.Server(415): Started @13733ms 2024-12-05T07:44:49,024 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T07:44:49,059 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T07:44:49,067 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T07:44:49,071 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T07:44:49,071 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T07:44:49,071 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T07:44:49,071 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@29927a24{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/879b195a-6d70-226d-0978-9f63c906d950/hadoop.log.dir/,AVAILABLE} 2024-12-05T07:44:49,072 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@63ea337e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T07:44:49,170 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2c130e95{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/879b195a-6d70-226d-0978-9f63c906d950/java.io.tmpdir/jetty-localhost-42921-hadoop-hdfs-3_4_1-tests_jar-_-any-3911671504963704103/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T07:44:49,170 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@21c2764e{HTTP/1.1, (http/1.1)}{localhost:42921} 2024-12-05T07:44:49,170 INFO [Time-limited test {}] server.Server(415): Started @13880ms 2024-12-05T07:44:49,172 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T07:44:49,206 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T07:44:49,210 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T07:44:49,211 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T07:44:49,211 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T07:44:49,211 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T07:44:49,212 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4dcac8d6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/879b195a-6d70-226d-0978-9f63c906d950/hadoop.log.dir/,AVAILABLE} 2024-12-05T07:44:49,212 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@19b3fbfc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T07:44:49,321 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1f872bbe{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/879b195a-6d70-226d-0978-9f63c906d950/java.io.tmpdir/jetty-localhost-36231-hadoop-hdfs-3_4_1-tests_jar-_-any-4478740470384606959/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T07:44:49,322 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@734ce002{HTTP/1.1, (http/1.1)}{localhost:36231} 2024-12-05T07:44:49,322 INFO [Time-limited test {}] server.Server(415): Started @14032ms 2024-12-05T07:44:49,323 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T07:44:49,872 WARN [Thread-558 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/879b195a-6d70-226d-0978-9f63c906d950/cluster_31b7b7bd-a015-2a30-f4f5-858b91d603d0/data/data1/current/BP-1891229739-172.17.0.2-1733384688285/current, will proceed with Du for space computation calculation, 2024-12-05T07:44:49,872 WARN [Thread-559 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/879b195a-6d70-226d-0978-9f63c906d950/cluster_31b7b7bd-a015-2a30-f4f5-858b91d603d0/data/data2/current/BP-1891229739-172.17.0.2-1733384688285/current, will proceed with Du for space computation calculation, 2024-12-05T07:44:49,896 WARN [Thread-500 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T07:44:49,900 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2fa29871a7c08d4d with lease ID 0x1c983cb96864ed8f: Processing first storage report for DS-51eb3132-a8a7-4506-88be-5eb950bd8cf7 from datanode DatanodeRegistration(127.0.0.1:38583, datanodeUuid=2b70ffb7-7322-4ec7-8eb7-6126a2a10100, infoPort=35747, infoSecurePort=0, ipcPort=35981, storageInfo=lv=-57;cid=testClusterID;nsid=251128697;c=1733384688285) 2024-12-05T07:44:49,900 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2fa29871a7c08d4d with lease ID 0x1c983cb96864ed8f: from storage DS-51eb3132-a8a7-4506-88be-5eb950bd8cf7 node DatanodeRegistration(127.0.0.1:38583, datanodeUuid=2b70ffb7-7322-4ec7-8eb7-6126a2a10100, infoPort=35747, infoSecurePort=0, ipcPort=35981, storageInfo=lv=-57;cid=testClusterID;nsid=251128697;c=1733384688285), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T07:44:49,900 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2fa29871a7c08d4d with lease ID 0x1c983cb96864ed8f: Processing first storage report for DS-e9cbc527-624e-4433-bfd5-be170c21ce62 from datanode DatanodeRegistration(127.0.0.1:38583, datanodeUuid=2b70ffb7-7322-4ec7-8eb7-6126a2a10100, infoPort=35747, infoSecurePort=0, ipcPort=35981, storageInfo=lv=-57;cid=testClusterID;nsid=251128697;c=1733384688285) 2024-12-05T07:44:49,900 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2fa29871a7c08d4d with lease ID 0x1c983cb96864ed8f: from storage DS-e9cbc527-624e-4433-bfd5-be170c21ce62 node DatanodeRegistration(127.0.0.1:38583, datanodeUuid=2b70ffb7-7322-4ec7-8eb7-6126a2a10100, infoPort=35747, infoSecurePort=0, ipcPort=35981, storageInfo=lv=-57;cid=testClusterID;nsid=251128697;c=1733384688285), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T07:44:50,166 WARN [Thread-572 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/879b195a-6d70-226d-0978-9f63c906d950/cluster_31b7b7bd-a015-2a30-f4f5-858b91d603d0/data/data4/current/BP-1891229739-172.17.0.2-1733384688285/current, will proceed with Du for space computation calculation, 2024-12-05T07:44:50,166 WARN [Thread-571 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/879b195a-6d70-226d-0978-9f63c906d950/cluster_31b7b7bd-a015-2a30-f4f5-858b91d603d0/data/data3/current/BP-1891229739-172.17.0.2-1733384688285/current, will proceed with Du for space computation calculation, 2024-12-05T07:44:50,194 WARN [Thread-523 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T07:44:50,196 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x96abebef900eb66d with lease ID 0x1c983cb96864ed90: Processing first storage report for DS-cdbe38be-046a-4a10-869e-e8e18c65e01d from datanode DatanodeRegistration(127.0.0.1:45877, datanodeUuid=21449816-2259-4802-bd99-f4a5f5828df6, infoPort=41273, infoSecurePort=0, ipcPort=42959, storageInfo=lv=-57;cid=testClusterID;nsid=251128697;c=1733384688285) 2024-12-05T07:44:50,197 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x96abebef900eb66d with lease ID 0x1c983cb96864ed90: from storage DS-cdbe38be-046a-4a10-869e-e8e18c65e01d node DatanodeRegistration(127.0.0.1:45877, datanodeUuid=21449816-2259-4802-bd99-f4a5f5828df6, infoPort=41273, infoSecurePort=0, ipcPort=42959, storageInfo=lv=-57;cid=testClusterID;nsid=251128697;c=1733384688285), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T07:44:50,197 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x96abebef900eb66d with lease ID 0x1c983cb96864ed90: Processing first storage report for DS-2feac68b-720b-495e-9978-624e8f6bf0f6 from datanode DatanodeRegistration(127.0.0.1:45877, datanodeUuid=21449816-2259-4802-bd99-f4a5f5828df6, infoPort=41273, infoSecurePort=0, ipcPort=42959, storageInfo=lv=-57;cid=testClusterID;nsid=251128697;c=1733384688285) 2024-12-05T07:44:50,197 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x96abebef900eb66d with lease ID 0x1c983cb96864ed90: from storage DS-2feac68b-720b-495e-9978-624e8f6bf0f6 node DatanodeRegistration(127.0.0.1:45877, datanodeUuid=21449816-2259-4802-bd99-f4a5f5828df6, infoPort=41273, infoSecurePort=0, ipcPort=42959, storageInfo=lv=-57;cid=testClusterID;nsid=251128697;c=1733384688285), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T07:44:50,299 WARN [Thread-582 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/879b195a-6d70-226d-0978-9f63c906d950/cluster_31b7b7bd-a015-2a30-f4f5-858b91d603d0/data/data5/current/BP-1891229739-172.17.0.2-1733384688285/current, will proceed with Du for space computation calculation, 2024-12-05T07:44:50,299 WARN [Thread-583 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/879b195a-6d70-226d-0978-9f63c906d950/cluster_31b7b7bd-a015-2a30-f4f5-858b91d603d0/data/data6/current/BP-1891229739-172.17.0.2-1733384688285/current, will proceed with Du for space computation calculation, 2024-12-05T07:44:50,323 WARN [Thread-545 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T07:44:50,329 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8c39fdacd3032907 with lease ID 0x1c983cb96864ed91: Processing first storage report for DS-70c1ca02-6547-44d6-aba4-4e8b338bfeae from datanode DatanodeRegistration(127.0.0.1:40881, datanodeUuid=9c569f87-685a-4eae-afaf-5e046b9abb72, infoPort=34113, infoSecurePort=0, ipcPort=44473, storageInfo=lv=-57;cid=testClusterID;nsid=251128697;c=1733384688285) 2024-12-05T07:44:50,329 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8c39fdacd3032907 with lease ID 0x1c983cb96864ed91: from storage DS-70c1ca02-6547-44d6-aba4-4e8b338bfeae node DatanodeRegistration(127.0.0.1:40881, datanodeUuid=9c569f87-685a-4eae-afaf-5e046b9abb72, infoPort=34113, infoSecurePort=0, ipcPort=44473, storageInfo=lv=-57;cid=testClusterID;nsid=251128697;c=1733384688285), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T07:44:50,329 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8c39fdacd3032907 with lease ID 0x1c983cb96864ed91: Processing first storage report for DS-e50dc7dc-4852-4370-b15e-2f62ccd9ce22 from datanode DatanodeRegistration(127.0.0.1:40881, datanodeUuid=9c569f87-685a-4eae-afaf-5e046b9abb72, infoPort=34113, infoSecurePort=0, ipcPort=44473, storageInfo=lv=-57;cid=testClusterID;nsid=251128697;c=1733384688285) 2024-12-05T07:44:50,329 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8c39fdacd3032907 with lease ID 0x1c983cb96864ed91: from storage DS-e50dc7dc-4852-4370-b15e-2f62ccd9ce22 node DatanodeRegistration(127.0.0.1:40881, datanodeUuid=9c569f87-685a-4eae-afaf-5e046b9abb72, infoPort=34113, infoSecurePort=0, ipcPort=44473, storageInfo=lv=-57;cid=testClusterID;nsid=251128697;c=1733384688285), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-05T07:44:50,376 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/879b195a-6d70-226d-0978-9f63c906d950 2024-12-05T07:44:50,378 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/879b195a-6d70-226d-0978-9f63c906d950/cluster_31b7b7bd-a015-2a30-f4f5-858b91d603d0/zookeeper_0, clientPort=56053, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/879b195a-6d70-226d-0978-9f63c906d950/cluster_31b7b7bd-a015-2a30-f4f5-858b91d603d0/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/879b195a-6d70-226d-0978-9f63c906d950/cluster_31b7b7bd-a015-2a30-f4f5-858b91d603d0/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-05T07:44:50,379 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56053 2024-12-05T07:44:50,380 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T07:44:50,382 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T07:44:50,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38583 is added to blk_1073741825_1001 (size=7) 2024-12-05T07:44:50,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45877 is added to blk_1073741825_1001 (size=7) 2024-12-05T07:44:50,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40881 is added to blk_1073741825_1001 (size=7) 2024-12-05T07:44:50,397 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0 with version=8 2024-12-05T07:44:50,397 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:40085/user/jenkins/test-data/a180b042-b085-8ee1-824b-b72eac456a85/hbase-staging 2024-12-05T07:44:50,399 INFO [Time-limited test {}] client.ConnectionUtils(128): master/fc6cd38557f3:0 server-side Connection retries=45 2024-12-05T07:44:50,400 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T07:44:50,401 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T07:44:50,401 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T07:44:50,401 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T07:44:50,401 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T07:44:50,401 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-05T07:44:50,401 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T07:44:50,402 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44233 2024-12-05T07:44:50,404 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44233 connecting to ZooKeeper ensemble=127.0.0.1:56053 2024-12-05T07:44:50,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:442330x0, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T07:44:50,452 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44233-0x101a5bced8f0000 connected 2024-12-05T07:44:50,515 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T07:44:50,517 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T07:44:50,519 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44233-0x101a5bced8f0000, quorum=127.0.0.1:56053, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T07:44:50,519 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0, hbase.cluster.distributed=false 2024-12-05T07:44:50,522 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44233-0x101a5bced8f0000, quorum=127.0.0.1:56053, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T07:44:50,527 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44233 2024-12-05T07:44:50,527 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44233 2024-12-05T07:44:50,527 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44233 2024-12-05T07:44:50,527 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44233 2024-12-05T07:44:50,528 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44233 2024-12-05T07:44:50,542 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/fc6cd38557f3:0 server-side Connection retries=45 2024-12-05T07:44:50,542 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T07:44:50,542 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T07:44:50,542 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T07:44:50,542 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T07:44:50,542 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T07:44:50,542 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-05T07:44:50,543 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T07:44:50,543 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42055 2024-12-05T07:44:50,544 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42055 connecting to ZooKeeper ensemble=127.0.0.1:56053 2024-12-05T07:44:50,545 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T07:44:50,547 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T07:44:50,556 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:420550x0, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T07:44:50,557 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:420550x0, quorum=127.0.0.1:56053, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T07:44:50,557 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42055-0x101a5bced8f0001 connected 2024-12-05T07:44:50,557 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-05T07:44:50,562 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-05T07:44:50,562 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42055-0x101a5bced8f0001, quorum=127.0.0.1:56053, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-05T07:44:50,564 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42055-0x101a5bced8f0001, quorum=127.0.0.1:56053, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T07:44:50,564 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42055 2024-12-05T07:44:50,565 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42055 2024-12-05T07:44:50,565 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42055 2024-12-05T07:44:50,565 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42055 2024-12-05T07:44:50,566 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42055 2024-12-05T07:44:50,585 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/fc6cd38557f3:0 server-side Connection retries=45 2024-12-05T07:44:50,586 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T07:44:50,586 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T07:44:50,586 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T07:44:50,586 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T07:44:50,586 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T07:44:50,586 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-05T07:44:50,586 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T07:44:50,587 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37385 2024-12-05T07:44:50,589 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37385 connecting to ZooKeeper ensemble=127.0.0.1:56053 2024-12-05T07:44:50,590 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T07:44:50,591 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T07:44:50,605 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:373850x0, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T07:44:50,605 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:373850x0, quorum=127.0.0.1:56053, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T07:44:50,605 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37385-0x101a5bced8f0002 connected 2024-12-05T07:44:50,606 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-05T07:44:50,606 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-05T07:44:50,607 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37385-0x101a5bced8f0002, quorum=127.0.0.1:56053, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-05T07:44:50,608 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37385-0x101a5bced8f0002, quorum=127.0.0.1:56053, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T07:44:50,610 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37385 2024-12-05T07:44:50,611 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37385 2024-12-05T07:44:50,611 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37385 2024-12-05T07:44:50,611 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37385 2024-12-05T07:44:50,611 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37385 2024-12-05T07:44:50,626 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/fc6cd38557f3:0 server-side Connection retries=45 2024-12-05T07:44:50,626 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T07:44:50,626 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T07:44:50,626 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T07:44:50,626 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T07:44:50,626 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T07:44:50,626 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-05T07:44:50,627 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T07:44:50,627 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42825 2024-12-05T07:44:50,629 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42825 connecting to ZooKeeper ensemble=127.0.0.1:56053 2024-12-05T07:44:50,630 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T07:44:50,632 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T07:44:50,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:428250x0, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T07:44:50,647 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42825-0x101a5bced8f0003 connected 2024-12-05T07:44:50,648 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42825-0x101a5bced8f0003, quorum=127.0.0.1:56053, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T07:44:50,648 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-05T07:44:50,649 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-05T07:44:50,650 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42825-0x101a5bced8f0003, quorum=127.0.0.1:56053, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-05T07:44:50,652 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42825-0x101a5bced8f0003, quorum=127.0.0.1:56053, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T07:44:50,654 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42825 2024-12-05T07:44:50,661 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42825 2024-12-05T07:44:50,661 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42825 2024-12-05T07:44:50,663 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42825 2024-12-05T07:44:50,663 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42825 2024-12-05T07:44:50,679 DEBUG [M:0;fc6cd38557f3:44233 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;fc6cd38557f3:44233 2024-12-05T07:44:50,679 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/fc6cd38557f3,44233,1733384690399 2024-12-05T07:44:50,689 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42055-0x101a5bced8f0001, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T07:44:50,689 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37385-0x101a5bced8f0002, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T07:44:50,689 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44233-0x101a5bced8f0000, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T07:44:50,690 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44233-0x101a5bced8f0000, quorum=127.0.0.1:56053, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/fc6cd38557f3,44233,1733384690399 2024-12-05T07:44:50,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42825-0x101a5bced8f0003, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T07:44:50,698 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44233-0x101a5bced8f0000, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:50,698 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42055-0x101a5bced8f0001, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-05T07:44:50,698 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42825-0x101a5bced8f0003, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-05T07:44:50,698 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37385-0x101a5bced8f0002, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-05T07:44:50,698 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42055-0x101a5bced8f0001, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:50,698 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42825-0x101a5bced8f0003, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:50,698 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37385-0x101a5bced8f0002, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:50,699 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44233-0x101a5bced8f0000, quorum=127.0.0.1:56053, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-05T07:44:50,701 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/fc6cd38557f3,44233,1733384690399 from backup master directory 2024-12-05T07:44:50,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42825-0x101a5bced8f0003, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T07:44:50,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44233-0x101a5bced8f0000, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/fc6cd38557f3,44233,1733384690399 2024-12-05T07:44:50,706 WARN [master/fc6cd38557f3:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T07:44:50,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44233-0x101a5bced8f0000, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T07:44:50,706 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=fc6cd38557f3,44233,1733384690399 2024-12-05T07:44:50,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37385-0x101a5bced8f0002, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T07:44:50,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42055-0x101a5bced8f0001, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T07:44:50,713 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/hbase.id] with ID: 17d6b223-5cb5-4437-80c1-e3d566d41f98 2024-12-05T07:44:50,713 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/.tmp/hbase.id 2024-12-05T07:44:50,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38583 is added to blk_1073741826_1002 (size=42) 2024-12-05T07:44:50,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40881 is added to blk_1073741826_1002 (size=42) 2024-12-05T07:44:50,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45877 is added to blk_1073741826_1002 (size=42) 2024-12-05T07:44:50,725 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/.tmp/hbase.id]:[hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/hbase.id] 2024-12-05T07:44:50,743 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T07:44:50,743 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-05T07:44:50,745 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-05T07:44:50,756 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42055-0x101a5bced8f0001, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:50,756 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37385-0x101a5bced8f0002, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:50,756 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44233-0x101a5bced8f0000, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:50,756 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42825-0x101a5bced8f0003, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:50,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38583 is added to blk_1073741827_1003 (size=196) 2024-12-05T07:44:50,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45877 is added to blk_1073741827_1003 (size=196) 2024-12-05T07:44:50,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40881 is added to blk_1073741827_1003 (size=196) 2024-12-05T07:44:50,775 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T07:44:50,777 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-05T07:44:50,777 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-05T07:44:50,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40881 is added to blk_1073741828_1004 (size=1189) 2024-12-05T07:44:50,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38583 is added to blk_1073741828_1004 (size=1189) 2024-12-05T07:44:50,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45877 is added to blk_1073741828_1004 (size=1189) 2024-12-05T07:44:50,803 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/MasterData/data/master/store 2024-12-05T07:44:50,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40881 is added to blk_1073741829_1005 (size=34) 2024-12-05T07:44:50,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38583 is added to blk_1073741829_1005 (size=34) 2024-12-05T07:44:50,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45877 is added to blk_1073741829_1005 (size=34) 2024-12-05T07:44:50,818 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T07:44:50,818 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-05T07:44:50,818 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T07:44:50,818 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T07:44:50,818 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-05T07:44:50,818 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T07:44:50,818 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T07:44:50,818 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733384690818Disabling compacts and flushes for region at 1733384690818Disabling writes for close at 1733384690818Writing region close event to WAL at 1733384690818Closed at 1733384690818 2024-12-05T07:44:50,819 WARN [master/fc6cd38557f3:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/MasterData/data/master/store/.initializing 2024-12-05T07:44:50,819 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/MasterData/WALs/fc6cd38557f3,44233,1733384690399 2024-12-05T07:44:50,824 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=fc6cd38557f3%2C44233%2C1733384690399, suffix=, logDir=hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/MasterData/WALs/fc6cd38557f3,44233,1733384690399, archiveDir=hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/MasterData/oldWALs, maxLogs=10 2024-12-05T07:44:50,825 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor fc6cd38557f3%2C44233%2C1733384690399.1733384690824 2024-12-05T07:44:50,836 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/MasterData/WALs/fc6cd38557f3,44233,1733384690399/fc6cd38557f3%2C44233%2C1733384690399.1733384690824 2024-12-05T07:44:50,843 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41273:41273),(127.0.0.1/127.0.0.1:35747:35747),(127.0.0.1/127.0.0.1:34113:34113)] 2024-12-05T07:44:50,844 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-05T07:44:50,845 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T07:44:50,845 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T07:44:50,845 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T07:44:50,851 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T07:44:50,853 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-05T07:44:50,853 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T07:44:50,854 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T07:44:50,854 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T07:44:50,856 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-05T07:44:50,856 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T07:44:50,857 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T07:44:50,857 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T07:44:50,860 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-05T07:44:50,861 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T07:44:50,862 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T07:44:50,862 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T07:44:50,865 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-05T07:44:50,865 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T07:44:50,866 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T07:44:50,866 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T07:44:50,867 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-05T07:44:50,868 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-05T07:44:50,869 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T07:44:50,869 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T07:44:50,870 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-05T07:44:50,872 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T07:44:50,875 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T07:44:50,875 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70745945, jitterRate=0.05419673025608063}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-05T07:44:50,876 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733384690845Initializing all the Stores at 1733384690848 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733384690848Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733384690850 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733384690850Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733384690850Cleaning up temporary data from old regions at 1733384690869 (+19 ms)Region opened successfully at 1733384690876 (+7 ms) 2024-12-05T07:44:50,877 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-05T07:44:50,882 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f91161e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=fc6cd38557f3/172.17.0.2:0 2024-12-05T07:44:50,883 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-05T07:44:50,883 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-05T07:44:50,883 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-05T07:44:50,884 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-05T07:44:50,885 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-05T07:44:50,885 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-05T07:44:50,885 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-05T07:44:50,888 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-05T07:44:50,889 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44233-0x101a5bced8f0000, quorum=127.0.0.1:56053, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-05T07:44:50,906 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-05T07:44:50,907 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-05T07:44:50,908 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44233-0x101a5bced8f0000, quorum=127.0.0.1:56053, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-05T07:44:50,914 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-05T07:44:50,915 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-05T07:44:50,916 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44233-0x101a5bced8f0000, quorum=127.0.0.1:56053, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-05T07:44:50,922 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-05T07:44:50,924 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44233-0x101a5bced8f0000, quorum=127.0.0.1:56053, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-05T07:44:50,931 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-05T07:44:50,934 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44233-0x101a5bced8f0000, quorum=127.0.0.1:56053, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-05T07:44:50,939 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-05T07:44:50,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44233-0x101a5bced8f0000, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T07:44:50,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42825-0x101a5bced8f0003, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T07:44:50,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37385-0x101a5bced8f0002, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T07:44:50,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44233-0x101a5bced8f0000, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:50,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42055-0x101a5bced8f0001, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T07:44:50,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37385-0x101a5bced8f0002, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:50,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42825-0x101a5bced8f0003, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:50,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42055-0x101a5bced8f0001, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:50,950 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=fc6cd38557f3,44233,1733384690399, sessionid=0x101a5bced8f0000, setting cluster-up flag (Was=false) 2024-12-05T07:44:50,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37385-0x101a5bced8f0002, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:50,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42055-0x101a5bced8f0001, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:50,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44233-0x101a5bced8f0000, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:50,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42825-0x101a5bced8f0003, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:50,989 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-05T07:44:50,991 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=fc6cd38557f3,44233,1733384690399 2024-12-05T07:44:51,006 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42055-0x101a5bced8f0001, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:51,006 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42825-0x101a5bced8f0003, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:51,006 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37385-0x101a5bced8f0002, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:51,006 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44233-0x101a5bced8f0000, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:51,021 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-05T07:44:51,029 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T07:44:51,031 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-05T07:44:51,032 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=fc6cd38557f3,44233,1733384690399 2024-12-05T07:44:51,034 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-05T07:44:51,037 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-05T07:44:51,037 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-05T07:44:51,037 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-05T07:44:51,038 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: fc6cd38557f3,44233,1733384690399 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-05T07:44:51,039 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/fc6cd38557f3:0, corePoolSize=5, maxPoolSize=5 2024-12-05T07:44:51,039 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/fc6cd38557f3:0, corePoolSize=5, maxPoolSize=5 2024-12-05T07:44:51,039 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/fc6cd38557f3:0, corePoolSize=5, maxPoolSize=5 2024-12-05T07:44:51,040 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/fc6cd38557f3:0, corePoolSize=5, maxPoolSize=5 2024-12-05T07:44:51,040 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/fc6cd38557f3:0, corePoolSize=10, maxPoolSize=10 2024-12-05T07:44:51,040 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:51,040 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/fc6cd38557f3:0, corePoolSize=2, maxPoolSize=2 2024-12-05T07:44:51,040 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:51,042 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T07:44:51,043 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-05T07:44:51,044 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T07:44:51,045 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-05T07:44:51,045 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733384721045 2024-12-05T07:44:51,046 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-05T07:44:51,046 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-05T07:44:51,046 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-05T07:44:51,046 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-05T07:44:51,046 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-05T07:44:51,046 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-05T07:44:51,046 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:51,047 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-05T07:44:51,047 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-05T07:44:51,047 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-05T07:44:51,048 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-05T07:44:51,048 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-05T07:44:51,048 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/fc6cd38557f3:0:becomeActiveMaster-HFileCleaner.large.0-1733384691048,5,FailOnTimeoutGroup] 2024-12-05T07:44:51,048 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/fc6cd38557f3:0:becomeActiveMaster-HFileCleaner.small.0-1733384691048,5,FailOnTimeoutGroup] 2024-12-05T07:44:51,048 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:51,048 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-05T07:44:51,048 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:51,048 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:51,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38583 is added to blk_1073741831_1007 (size=1321) 2024-12-05T07:44:51,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40881 is added to blk_1073741831_1007 (size=1321) 2024-12-05T07:44:51,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45877 is added to blk_1073741831_1007 (size=1321) 2024-12-05T07:44:51,063 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-05T07:44:51,063 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0 2024-12-05T07:44:51,065 INFO [RS:1;fc6cd38557f3:37385 {}] regionserver.HRegionServer(746): ClusterId : 17d6b223-5cb5-4437-80c1-e3d566d41f98 2024-12-05T07:44:51,066 DEBUG [RS:1;fc6cd38557f3:37385 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-05T07:44:51,066 INFO [RS:0;fc6cd38557f3:42055 {}] regionserver.HRegionServer(746): ClusterId : 17d6b223-5cb5-4437-80c1-e3d566d41f98 2024-12-05T07:44:51,066 DEBUG [RS:0;fc6cd38557f3:42055 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-05T07:44:51,066 INFO [RS:2;fc6cd38557f3:42825 {}] regionserver.HRegionServer(746): ClusterId : 17d6b223-5cb5-4437-80c1-e3d566d41f98 2024-12-05T07:44:51,066 DEBUG [RS:2;fc6cd38557f3:42825 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-05T07:44:51,081 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T07:44:51,082 DEBUG [RS:1;fc6cd38557f3:37385 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-05T07:44:51,082 DEBUG [RS:0;fc6cd38557f3:42055 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-05T07:44:51,082 DEBUG [RS:1;fc6cd38557f3:37385 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-05T07:44:51,082 DEBUG [RS:0;fc6cd38557f3:42055 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-05T07:44:51,082 DEBUG [RS:2;fc6cd38557f3:42825 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-05T07:44:51,082 DEBUG [RS:2;fc6cd38557f3:42825 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-05T07:44:51,083 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-05T07:44:51,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38583 is added to blk_1073741832_1008 (size=32) 2024-12-05T07:44:51,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45877 is added to blk_1073741832_1008 (size=32) 2024-12-05T07:44:51,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40881 is added to blk_1073741832_1008 (size=32) 2024-12-05T07:44:51,090 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T07:44:51,092 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-05T07:44:51,093 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-05T07:44:51,094 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T07:44:51,094 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T07:44:51,094 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-05T07:44:51,096 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-05T07:44:51,096 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T07:44:51,096 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T07:44:51,096 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-05T07:44:51,098 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-05T07:44:51,098 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T07:44:51,100 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T07:44:51,100 DEBUG [RS:1;fc6cd38557f3:37385 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-05T07:44:51,100 DEBUG [RS:2;fc6cd38557f3:42825 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-05T07:44:51,100 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-05T07:44:51,100 DEBUG [RS:2;fc6cd38557f3:42825 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69f037b5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=fc6cd38557f3/172.17.0.2:0 2024-12-05T07:44:51,100 DEBUG [RS:1;fc6cd38557f3:37385 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@720246ac, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=fc6cd38557f3/172.17.0.2:0 2024-12-05T07:44:51,101 DEBUG [RS:0;fc6cd38557f3:42055 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-05T07:44:51,101 DEBUG [RS:0;fc6cd38557f3:42055 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@290d45c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=fc6cd38557f3/172.17.0.2:0 2024-12-05T07:44:51,102 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-05T07:44:51,102 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T07:44:51,103 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T07:44:51,103 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-05T07:44:51,105 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/data/hbase/meta/1588230740 2024-12-05T07:44:51,105 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/data/hbase/meta/1588230740 2024-12-05T07:44:51,108 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-05T07:44:51,108 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-05T07:44:51,109 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-05T07:44:51,111 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-05T07:44:51,115 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T07:44:51,116 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60136225, jitterRate=-0.10390041768550873}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-05T07:44:51,117 DEBUG [RS:0;fc6cd38557f3:42055 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;fc6cd38557f3:42055 2024-12-05T07:44:51,117 DEBUG [RS:1;fc6cd38557f3:37385 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;fc6cd38557f3:37385 2024-12-05T07:44:51,117 INFO [RS:1;fc6cd38557f3:37385 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-05T07:44:51,117 INFO [RS:0;fc6cd38557f3:42055 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-05T07:44:51,117 INFO [RS:1;fc6cd38557f3:37385 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-05T07:44:51,117 INFO [RS:0;fc6cd38557f3:42055 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-05T07:44:51,117 DEBUG [RS:1;fc6cd38557f3:37385 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-05T07:44:51,117 DEBUG [RS:0;fc6cd38557f3:42055 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-05T07:44:51,117 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733384691090Initializing all the Stores at 1733384691091 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733384691091Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733384691092 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733384691092Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733384691092Cleaning up temporary data from old regions at 1733384691108 (+16 ms)Region opened successfully at 1733384691117 (+9 ms) 2024-12-05T07:44:51,117 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-05T07:44:51,117 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-05T07:44:51,117 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-05T07:44:51,117 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-05T07:44:51,117 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-05T07:44:51,118 INFO [RS:1;fc6cd38557f3:37385 {}] regionserver.HRegionServer(2659): reportForDuty to master=fc6cd38557f3,44233,1733384690399 with port=37385, startcode=1733384690585 2024-12-05T07:44:51,118 INFO [RS:0;fc6cd38557f3:42055 {}] regionserver.HRegionServer(2659): reportForDuty to master=fc6cd38557f3,44233,1733384690399 with port=42055, startcode=1733384690542 2024-12-05T07:44:51,118 DEBUG [RS:1;fc6cd38557f3:37385 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-05T07:44:51,118 DEBUG [RS:2;fc6cd38557f3:42825 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;fc6cd38557f3:42825 2024-12-05T07:44:51,118 INFO [RS:2;fc6cd38557f3:42825 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-05T07:44:51,118 INFO [RS:2;fc6cd38557f3:42825 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-05T07:44:51,118 DEBUG [RS:2;fc6cd38557f3:42825 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-05T07:44:51,118 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-05T07:44:51,118 DEBUG [RS:0;fc6cd38557f3:42055 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-05T07:44:51,119 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733384691117Disabling compacts and flushes for region at 1733384691117Disabling writes for close at 1733384691117Writing region close event to WAL at 1733384691118 (+1 ms)Closed at 1733384691118 2024-12-05T07:44:51,119 INFO [RS:2;fc6cd38557f3:42825 {}] regionserver.HRegionServer(2659): reportForDuty to master=fc6cd38557f3,44233,1733384690399 with port=42825, startcode=1733384690626 2024-12-05T07:44:51,119 DEBUG [RS:2;fc6cd38557f3:42825 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-05T07:44:51,121 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41155, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-05T07:44:51,121 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53345, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-05T07:44:51,121 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60339, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-05T07:44:51,122 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T07:44:51,122 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-05T07:44:51,122 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-05T07:44:51,122 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44233 {}] master.ServerManager(363): Checking decommissioned status of RegionServer fc6cd38557f3,37385,1733384690585 2024-12-05T07:44:51,122 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44233 {}] master.ServerManager(517): Registering regionserver=fc6cd38557f3,37385,1733384690585 2024-12-05T07:44:51,124 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-05T07:44:51,124 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44233 {}] master.ServerManager(363): Checking decommissioned status of RegionServer fc6cd38557f3,42825,1733384690626 2024-12-05T07:44:51,124 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44233 {}] master.ServerManager(517): Registering regionserver=fc6cd38557f3,42825,1733384690626 2024-12-05T07:44:51,125 DEBUG [RS:1;fc6cd38557f3:37385 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0 2024-12-05T07:44:51,125 DEBUG [RS:1;fc6cd38557f3:37385 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36391 2024-12-05T07:44:51,125 DEBUG [RS:1;fc6cd38557f3:37385 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-05T07:44:51,126 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-05T07:44:51,126 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44233 {}] master.ServerManager(363): Checking decommissioned status of RegionServer fc6cd38557f3,42055,1733384690542 2024-12-05T07:44:51,126 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44233 {}] master.ServerManager(517): Registering regionserver=fc6cd38557f3,42055,1733384690542 2024-12-05T07:44:51,127 DEBUG [RS:2;fc6cd38557f3:42825 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0 2024-12-05T07:44:51,127 DEBUG [RS:2;fc6cd38557f3:42825 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36391 2024-12-05T07:44:51,127 DEBUG [RS:2;fc6cd38557f3:42825 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-05T07:44:51,128 DEBUG [RS:0;fc6cd38557f3:42055 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0 2024-12-05T07:44:51,128 DEBUG [RS:0;fc6cd38557f3:42055 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36391 2024-12-05T07:44:51,128 DEBUG [RS:0;fc6cd38557f3:42055 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-05T07:44:51,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44233-0x101a5bced8f0000, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T07:44:51,161 DEBUG [RS:1;fc6cd38557f3:37385 {}] zookeeper.ZKUtil(111): regionserver:37385-0x101a5bced8f0002, quorum=127.0.0.1:56053, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/fc6cd38557f3,37385,1733384690585 2024-12-05T07:44:51,161 WARN [RS:1;fc6cd38557f3:37385 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T07:44:51,161 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [fc6cd38557f3,42055,1733384690542] 2024-12-05T07:44:51,161 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [fc6cd38557f3,37385,1733384690585] 2024-12-05T07:44:51,161 INFO [RS:1;fc6cd38557f3:37385 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-05T07:44:51,161 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [fc6cd38557f3,42825,1733384690626] 2024-12-05T07:44:51,161 DEBUG [RS:2;fc6cd38557f3:42825 {}] zookeeper.ZKUtil(111): regionserver:42825-0x101a5bced8f0003, quorum=127.0.0.1:56053, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/fc6cd38557f3,42825,1733384690626 2024-12-05T07:44:51,161 DEBUG [RS:1;fc6cd38557f3:37385 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/WALs/fc6cd38557f3,37385,1733384690585 2024-12-05T07:44:51,161 DEBUG [RS:0;fc6cd38557f3:42055 {}] zookeeper.ZKUtil(111): regionserver:42055-0x101a5bced8f0001, quorum=127.0.0.1:56053, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/fc6cd38557f3,42055,1733384690542 2024-12-05T07:44:51,161 WARN [RS:2;fc6cd38557f3:42825 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T07:44:51,161 WARN [RS:0;fc6cd38557f3:42055 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T07:44:51,161 INFO [RS:2;fc6cd38557f3:42825 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-05T07:44:51,162 INFO [RS:0;fc6cd38557f3:42055 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-05T07:44:51,162 DEBUG [RS:2;fc6cd38557f3:42825 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/WALs/fc6cd38557f3,42825,1733384690626 2024-12-05T07:44:51,162 DEBUG [RS:0;fc6cd38557f3:42055 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/WALs/fc6cd38557f3,42055,1733384690542 2024-12-05T07:44:51,166 INFO [RS:1;fc6cd38557f3:37385 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-05T07:44:51,166 INFO [RS:2;fc6cd38557f3:42825 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-05T07:44:51,166 INFO [RS:0;fc6cd38557f3:42055 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-05T07:44:51,170 INFO [RS:1;fc6cd38557f3:37385 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-05T07:44:51,170 INFO [RS:2;fc6cd38557f3:42825 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-05T07:44:51,170 INFO [RS:1;fc6cd38557f3:37385 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-05T07:44:51,170 INFO [RS:1;fc6cd38557f3:37385 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:51,170 INFO [RS:2;fc6cd38557f3:42825 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-05T07:44:51,170 INFO [RS:2;fc6cd38557f3:42825 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:51,172 INFO [RS:1;fc6cd38557f3:37385 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-05T07:44:51,172 INFO [RS:2;fc6cd38557f3:42825 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-05T07:44:51,173 INFO [RS:2;fc6cd38557f3:42825 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-05T07:44:51,173 INFO [RS:2;fc6cd38557f3:42825 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:51,173 DEBUG [RS:2;fc6cd38557f3:42825 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:51,173 DEBUG [RS:2;fc6cd38557f3:42825 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:51,173 DEBUG [RS:2;fc6cd38557f3:42825 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:51,174 DEBUG [RS:2;fc6cd38557f3:42825 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:51,174 DEBUG [RS:2;fc6cd38557f3:42825 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:51,174 DEBUG [RS:2;fc6cd38557f3:42825 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/fc6cd38557f3:0, corePoolSize=2, maxPoolSize=2 2024-12-05T07:44:51,174 DEBUG [RS:2;fc6cd38557f3:42825 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:51,174 DEBUG [RS:2;fc6cd38557f3:42825 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:51,174 DEBUG [RS:2;fc6cd38557f3:42825 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:51,174 DEBUG [RS:2;fc6cd38557f3:42825 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:51,174 DEBUG [RS:2;fc6cd38557f3:42825 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:51,174 INFO [RS:1;fc6cd38557f3:37385 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-05T07:44:51,174 DEBUG [RS:2;fc6cd38557f3:42825 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:51,174 DEBUG [RS:2;fc6cd38557f3:42825 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/fc6cd38557f3:0, corePoolSize=3, maxPoolSize=3 2024-12-05T07:44:51,174 INFO [RS:0;fc6cd38557f3:42055 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-05T07:44:51,174 INFO [RS:1;fc6cd38557f3:37385 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:51,174 DEBUG [RS:2;fc6cd38557f3:42825 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/fc6cd38557f3:0, corePoolSize=3, maxPoolSize=3 2024-12-05T07:44:51,175 DEBUG [RS:1;fc6cd38557f3:37385 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:51,175 DEBUG [RS:1;fc6cd38557f3:37385 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:51,175 DEBUG [RS:1;fc6cd38557f3:37385 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:51,175 DEBUG [RS:1;fc6cd38557f3:37385 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:51,175 DEBUG [RS:1;fc6cd38557f3:37385 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:51,175 DEBUG [RS:1;fc6cd38557f3:37385 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/fc6cd38557f3:0, corePoolSize=2, maxPoolSize=2 2024-12-05T07:44:51,175 DEBUG [RS:1;fc6cd38557f3:37385 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:51,175 DEBUG [RS:1;fc6cd38557f3:37385 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:51,175 DEBUG [RS:1;fc6cd38557f3:37385 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:51,175 DEBUG [RS:1;fc6cd38557f3:37385 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:51,175 DEBUG [RS:1;fc6cd38557f3:37385 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:51,175 DEBUG [RS:1;fc6cd38557f3:37385 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:51,175 DEBUG [RS:1;fc6cd38557f3:37385 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/fc6cd38557f3:0, corePoolSize=3, maxPoolSize=3 2024-12-05T07:44:51,175 DEBUG [RS:1;fc6cd38557f3:37385 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/fc6cd38557f3:0, corePoolSize=3, maxPoolSize=3 2024-12-05T07:44:51,183 INFO [RS:0;fc6cd38557f3:42055 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-05T07:44:51,183 INFO [RS:0;fc6cd38557f3:42055 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:51,187 INFO [RS:0;fc6cd38557f3:42055 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-05T07:44:51,187 INFO [RS:2;fc6cd38557f3:42825 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:51,187 INFO [RS:2;fc6cd38557f3:42825 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:51,187 INFO [RS:2;fc6cd38557f3:42825 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:51,187 INFO [RS:2;fc6cd38557f3:42825 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:51,187 INFO [RS:2;fc6cd38557f3:42825 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:51,187 INFO [RS:2;fc6cd38557f3:42825 {}] hbase.ChoreService(168): Chore ScheduledChore name=fc6cd38557f3,42825,1733384690626-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T07:44:51,189 INFO [RS:1;fc6cd38557f3:37385 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:51,189 INFO [RS:1;fc6cd38557f3:37385 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:51,189 INFO [RS:1;fc6cd38557f3:37385 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:51,189 INFO [RS:1;fc6cd38557f3:37385 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:51,189 INFO [RS:1;fc6cd38557f3:37385 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:51,189 INFO [RS:1;fc6cd38557f3:37385 {}] hbase.ChoreService(168): Chore ScheduledChore name=fc6cd38557f3,37385,1733384690585-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T07:44:51,191 INFO [RS:0;fc6cd38557f3:42055 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-05T07:44:51,191 INFO [RS:0;fc6cd38557f3:42055 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:51,191 DEBUG [RS:0;fc6cd38557f3:42055 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:51,191 DEBUG [RS:0;fc6cd38557f3:42055 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:51,191 DEBUG [RS:0;fc6cd38557f3:42055 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:51,191 DEBUG [RS:0;fc6cd38557f3:42055 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:51,191 DEBUG [RS:0;fc6cd38557f3:42055 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:51,192 DEBUG [RS:0;fc6cd38557f3:42055 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/fc6cd38557f3:0, corePoolSize=2, maxPoolSize=2 2024-12-05T07:44:51,192 DEBUG [RS:0;fc6cd38557f3:42055 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:51,192 DEBUG [RS:0;fc6cd38557f3:42055 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:51,192 DEBUG [RS:0;fc6cd38557f3:42055 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:51,192 DEBUG [RS:0;fc6cd38557f3:42055 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:51,192 DEBUG [RS:0;fc6cd38557f3:42055 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:51,192 DEBUG [RS:0;fc6cd38557f3:42055 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/fc6cd38557f3:0, corePoolSize=1, maxPoolSize=1 2024-12-05T07:44:51,192 DEBUG [RS:0;fc6cd38557f3:42055 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/fc6cd38557f3:0, corePoolSize=3, maxPoolSize=3 2024-12-05T07:44:51,192 DEBUG [RS:0;fc6cd38557f3:42055 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/fc6cd38557f3:0, corePoolSize=3, maxPoolSize=3 2024-12-05T07:44:51,198 INFO [RS:0;fc6cd38557f3:42055 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:51,199 INFO [RS:0;fc6cd38557f3:42055 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:51,199 INFO [RS:0;fc6cd38557f3:42055 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:51,199 INFO [RS:0;fc6cd38557f3:42055 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:51,199 INFO [RS:0;fc6cd38557f3:42055 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:51,199 INFO [RS:0;fc6cd38557f3:42055 {}] hbase.ChoreService(168): Chore ScheduledChore name=fc6cd38557f3,42055,1733384690542-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T07:44:51,204 INFO [RS:2;fc6cd38557f3:42825 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-05T07:44:51,204 INFO [RS:2;fc6cd38557f3:42825 {}] hbase.ChoreService(168): Chore ScheduledChore name=fc6cd38557f3,42825,1733384690626-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:51,204 INFO [RS:2;fc6cd38557f3:42825 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:51,204 INFO [RS:2;fc6cd38557f3:42825 {}] regionserver.Replication(171): fc6cd38557f3,42825,1733384690626 started 2024-12-05T07:44:51,210 INFO [RS:1;fc6cd38557f3:37385 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-05T07:44:51,210 INFO [RS:1;fc6cd38557f3:37385 {}] hbase.ChoreService(168): Chore ScheduledChore name=fc6cd38557f3,37385,1733384690585-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:51,210 INFO [RS:1;fc6cd38557f3:37385 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:51,211 INFO [RS:1;fc6cd38557f3:37385 {}] regionserver.Replication(171): fc6cd38557f3,37385,1733384690585 started 2024-12-05T07:44:51,218 INFO [RS:2;fc6cd38557f3:42825 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:51,218 INFO [RS:2;fc6cd38557f3:42825 {}] regionserver.HRegionServer(1482): Serving as fc6cd38557f3,42825,1733384690626, RpcServer on fc6cd38557f3/172.17.0.2:42825, sessionid=0x101a5bced8f0003 2024-12-05T07:44:51,218 DEBUG [RS:2;fc6cd38557f3:42825 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-05T07:44:51,219 DEBUG [RS:2;fc6cd38557f3:42825 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager fc6cd38557f3,42825,1733384690626 2024-12-05T07:44:51,219 DEBUG [RS:2;fc6cd38557f3:42825 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'fc6cd38557f3,42825,1733384690626' 2024-12-05T07:44:51,219 DEBUG [RS:2;fc6cd38557f3:42825 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-05T07:44:51,220 DEBUG [RS:2;fc6cd38557f3:42825 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-05T07:44:51,220 DEBUG [RS:2;fc6cd38557f3:42825 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-05T07:44:51,220 DEBUG [RS:2;fc6cd38557f3:42825 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-05T07:44:51,220 DEBUG [RS:2;fc6cd38557f3:42825 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager fc6cd38557f3,42825,1733384690626 2024-12-05T07:44:51,220 DEBUG [RS:2;fc6cd38557f3:42825 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'fc6cd38557f3,42825,1733384690626' 2024-12-05T07:44:51,220 DEBUG [RS:2;fc6cd38557f3:42825 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-05T07:44:51,221 DEBUG [RS:2;fc6cd38557f3:42825 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-05T07:44:51,221 INFO [RS:0;fc6cd38557f3:42055 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-05T07:44:51,222 INFO [RS:0;fc6cd38557f3:42055 {}] hbase.ChoreService(168): Chore ScheduledChore name=fc6cd38557f3,42055,1733384690542-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:51,222 INFO [RS:0;fc6cd38557f3:42055 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:51,222 INFO [RS:0;fc6cd38557f3:42055 {}] regionserver.Replication(171): fc6cd38557f3,42055,1733384690542 started 2024-12-05T07:44:51,222 DEBUG [RS:2;fc6cd38557f3:42825 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-05T07:44:51,223 INFO [RS:2;fc6cd38557f3:42825 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-05T07:44:51,223 INFO [RS:2;fc6cd38557f3:42825 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-05T07:44:51,228 INFO [RS:1;fc6cd38557f3:37385 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:51,228 INFO [RS:1;fc6cd38557f3:37385 {}] regionserver.HRegionServer(1482): Serving as fc6cd38557f3,37385,1733384690585, RpcServer on fc6cd38557f3/172.17.0.2:37385, sessionid=0x101a5bced8f0002 2024-12-05T07:44:51,228 DEBUG [RS:1;fc6cd38557f3:37385 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-05T07:44:51,228 DEBUG [RS:1;fc6cd38557f3:37385 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager fc6cd38557f3,37385,1733384690585 2024-12-05T07:44:51,228 DEBUG [RS:1;fc6cd38557f3:37385 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'fc6cd38557f3,37385,1733384690585' 2024-12-05T07:44:51,228 DEBUG [RS:1;fc6cd38557f3:37385 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-05T07:44:51,229 DEBUG [RS:1;fc6cd38557f3:37385 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-05T07:44:51,229 DEBUG [RS:1;fc6cd38557f3:37385 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-05T07:44:51,229 DEBUG [RS:1;fc6cd38557f3:37385 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-05T07:44:51,229 DEBUG [RS:1;fc6cd38557f3:37385 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager fc6cd38557f3,37385,1733384690585 2024-12-05T07:44:51,229 DEBUG [RS:1;fc6cd38557f3:37385 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'fc6cd38557f3,37385,1733384690585' 2024-12-05T07:44:51,229 DEBUG [RS:1;fc6cd38557f3:37385 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-05T07:44:51,230 DEBUG [RS:1;fc6cd38557f3:37385 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-05T07:44:51,230 DEBUG [RS:1;fc6cd38557f3:37385 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-05T07:44:51,230 INFO [RS:1;fc6cd38557f3:37385 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-05T07:44:51,230 INFO [RS:1;fc6cd38557f3:37385 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-05T07:44:51,237 INFO [RS:0;fc6cd38557f3:42055 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:51,237 INFO [RS:0;fc6cd38557f3:42055 {}] regionserver.HRegionServer(1482): Serving as fc6cd38557f3,42055,1733384690542, RpcServer on fc6cd38557f3/172.17.0.2:42055, sessionid=0x101a5bced8f0001 2024-12-05T07:44:51,237 DEBUG [RS:0;fc6cd38557f3:42055 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-05T07:44:51,237 DEBUG [RS:0;fc6cd38557f3:42055 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager fc6cd38557f3,42055,1733384690542 2024-12-05T07:44:51,237 DEBUG [RS:0;fc6cd38557f3:42055 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'fc6cd38557f3,42055,1733384690542' 2024-12-05T07:44:51,237 DEBUG [RS:0;fc6cd38557f3:42055 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-05T07:44:51,238 DEBUG [RS:0;fc6cd38557f3:42055 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-05T07:44:51,239 DEBUG [RS:0;fc6cd38557f3:42055 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-05T07:44:51,239 DEBUG [RS:0;fc6cd38557f3:42055 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-05T07:44:51,239 DEBUG [RS:0;fc6cd38557f3:42055 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager fc6cd38557f3,42055,1733384690542 2024-12-05T07:44:51,239 DEBUG [RS:0;fc6cd38557f3:42055 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'fc6cd38557f3,42055,1733384690542' 2024-12-05T07:44:51,239 DEBUG [RS:0;fc6cd38557f3:42055 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-05T07:44:51,240 DEBUG [RS:0;fc6cd38557f3:42055 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-05T07:44:51,240 DEBUG [RS:0;fc6cd38557f3:42055 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-05T07:44:51,240 INFO [RS:0;fc6cd38557f3:42055 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-05T07:44:51,240 INFO [RS:0;fc6cd38557f3:42055 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-05T07:44:51,276 WARN [fc6cd38557f3:44233 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-05T07:44:51,326 INFO [RS:2;fc6cd38557f3:42825 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=fc6cd38557f3%2C42825%2C1733384690626, suffix=, logDir=hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/WALs/fc6cd38557f3,42825,1733384690626, archiveDir=hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/oldWALs, maxLogs=32 2024-12-05T07:44:51,326 INFO [RS:2;fc6cd38557f3:42825 {}] monitor.StreamSlowMonitor(122): New stream slow monitor fc6cd38557f3%2C42825%2C1733384690626.1733384691326 2024-12-05T07:44:51,333 INFO [RS:1;fc6cd38557f3:37385 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=fc6cd38557f3%2C37385%2C1733384690585, suffix=, logDir=hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/WALs/fc6cd38557f3,37385,1733384690585, archiveDir=hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/oldWALs, maxLogs=32 2024-12-05T07:44:51,333 INFO [RS:1;fc6cd38557f3:37385 {}] monitor.StreamSlowMonitor(122): New stream slow monitor fc6cd38557f3%2C37385%2C1733384690585.1733384691333 2024-12-05T07:44:51,335 INFO [RS:2;fc6cd38557f3:42825 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/WALs/fc6cd38557f3,42825,1733384690626/fc6cd38557f3%2C42825%2C1733384690626.1733384691326 2024-12-05T07:44:51,337 DEBUG [RS:2;fc6cd38557f3:42825 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41273:41273),(127.0.0.1/127.0.0.1:35747:35747),(127.0.0.1/127.0.0.1:34113:34113)] 2024-12-05T07:44:51,343 INFO [RS:0;fc6cd38557f3:42055 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=fc6cd38557f3%2C42055%2C1733384690542, suffix=, logDir=hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/WALs/fc6cd38557f3,42055,1733384690542, archiveDir=hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/oldWALs, maxLogs=32 2024-12-05T07:44:51,344 INFO [RS:0;fc6cd38557f3:42055 {}] monitor.StreamSlowMonitor(122): New stream slow monitor fc6cd38557f3%2C42055%2C1733384690542.1733384691343 2024-12-05T07:44:51,344 INFO [RS:1;fc6cd38557f3:37385 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/WALs/fc6cd38557f3,37385,1733384690585/fc6cd38557f3%2C37385%2C1733384690585.1733384691333 2024-12-05T07:44:51,345 DEBUG [RS:1;fc6cd38557f3:37385 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41273:41273),(127.0.0.1/127.0.0.1:35747:35747),(127.0.0.1/127.0.0.1:34113:34113)] 2024-12-05T07:44:51,352 INFO [RS:0;fc6cd38557f3:42055 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/WALs/fc6cd38557f3,42055,1733384690542/fc6cd38557f3%2C42055%2C1733384690542.1733384691343 2024-12-05T07:44:51,353 DEBUG [RS:0;fc6cd38557f3:42055 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35747:35747),(127.0.0.1/127.0.0.1:41273:41273),(127.0.0.1/127.0.0.1:34113:34113)] 2024-12-05T07:44:51,527 DEBUG [fc6cd38557f3:44233 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-05T07:44:51,527 DEBUG [fc6cd38557f3:44233 {}] balancer.BalancerClusterState(204): Hosts are {fc6cd38557f3=0} racks are {/default-rack=0} 2024-12-05T07:44:51,529 DEBUG [fc6cd38557f3:44233 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T07:44:51,529 DEBUG [fc6cd38557f3:44233 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T07:44:51,529 DEBUG [fc6cd38557f3:44233 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T07:44:51,529 DEBUG [fc6cd38557f3:44233 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T07:44:51,529 DEBUG [fc6cd38557f3:44233 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T07:44:51,529 DEBUG [fc6cd38557f3:44233 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T07:44:51,529 INFO [fc6cd38557f3:44233 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T07:44:51,529 INFO [fc6cd38557f3:44233 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T07:44:51,529 INFO [fc6cd38557f3:44233 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T07:44:51,529 DEBUG [fc6cd38557f3:44233 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T07:44:51,530 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=fc6cd38557f3,42055,1733384690542 2024-12-05T07:44:51,532 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as fc6cd38557f3,42055,1733384690542, state=OPENING 2024-12-05T07:44:51,556 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-05T07:44:51,564 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44233-0x101a5bced8f0000, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:51,564 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42055-0x101a5bced8f0001, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:51,564 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42825-0x101a5bced8f0003, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:51,564 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37385-0x101a5bced8f0002, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:51,565 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T07:44:51,565 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T07:44:51,565 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T07:44:51,565 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T07:44:51,565 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-05T07:44:51,565 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=fc6cd38557f3,42055,1733384690542}] 2024-12-05T07:44:51,719 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-05T07:44:51,722 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59479, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-05T07:44:51,727 INFO [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-05T07:44:51,727 INFO [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-05T07:44:51,731 INFO [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=fc6cd38557f3%2C42055%2C1733384690542.meta, suffix=.meta, logDir=hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/WALs/fc6cd38557f3,42055,1733384690542, archiveDir=hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/oldWALs, maxLogs=32 2024-12-05T07:44:51,732 INFO [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor fc6cd38557f3%2C42055%2C1733384690542.meta.1733384691731.meta 2024-12-05T07:44:51,741 INFO [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/WALs/fc6cd38557f3,42055,1733384690542/fc6cd38557f3%2C42055%2C1733384690542.meta.1733384691731.meta 2024-12-05T07:44:51,741 DEBUG [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41273:41273),(127.0.0.1/127.0.0.1:35747:35747),(127.0.0.1/127.0.0.1:34113:34113)] 2024-12-05T07:44:51,742 DEBUG [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-05T07:44:51,742 DEBUG [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-05T07:44:51,742 DEBUG [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-05T07:44:51,743 INFO [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-05T07:44:51,743 DEBUG [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-05T07:44:51,743 DEBUG [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T07:44:51,743 DEBUG [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-05T07:44:51,743 DEBUG [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-05T07:44:51,744 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-05T07:44:51,746 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-05T07:44:51,746 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T07:44:51,746 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T07:44:51,746 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-05T07:44:51,748 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-05T07:44:51,748 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T07:44:51,748 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T07:44:51,749 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-05T07:44:51,750 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-05T07:44:51,750 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T07:44:51,750 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T07:44:51,750 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-05T07:44:51,751 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-05T07:44:51,752 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T07:44:51,752 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T07:44:51,752 DEBUG [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-05T07:44:51,753 DEBUG [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/data/hbase/meta/1588230740 2024-12-05T07:44:51,755 DEBUG [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/data/hbase/meta/1588230740 2024-12-05T07:44:51,757 DEBUG [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-05T07:44:51,757 DEBUG [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-05T07:44:51,758 DEBUG [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-05T07:44:51,760 DEBUG [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-05T07:44:51,761 INFO [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69553613, jitterRate=0.036429598927497864}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-05T07:44:51,761 DEBUG [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-05T07:44:51,762 DEBUG [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733384691743Writing region info on filesystem at 1733384691743Initializing all the Stores at 1733384691744 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733384691744Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733384691744Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733384691744Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733384691744Cleaning up temporary data from old regions at 1733384691757 (+13 ms)Running coprocessor post-open hooks at 1733384691761 (+4 ms)Region opened successfully at 1733384691762 (+1 ms) 2024-12-05T07:44:51,764 INFO [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733384691719 2024-12-05T07:44:51,767 DEBUG [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-05T07:44:51,767 INFO [RS_OPEN_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-05T07:44:51,768 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=fc6cd38557f3,42055,1733384690542 2024-12-05T07:44:51,770 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as fc6cd38557f3,42055,1733384690542, state=OPEN 2024-12-05T07:44:51,788 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42825-0x101a5bced8f0003, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T07:44:51,788 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37385-0x101a5bced8f0002, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T07:44:51,788 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42055-0x101a5bced8f0001, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T07:44:51,788 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44233-0x101a5bced8f0000, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T07:44:51,788 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=fc6cd38557f3,42055,1733384690542 2024-12-05T07:44:51,788 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T07:44:51,788 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T07:44:51,788 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T07:44:51,788 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T07:44:51,793 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-05T07:44:51,793 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=fc6cd38557f3,42055,1733384690542 in 223 msec 2024-12-05T07:44:51,797 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-05T07:44:51,797 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 671 msec 2024-12-05T07:44:51,799 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T07:44:51,800 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-05T07:44:51,801 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T07:44:51,802 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fc6cd38557f3,42055,1733384690542, seqNum=-1] 2024-12-05T07:44:51,802 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T07:44:51,808 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47423, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T07:44:51,824 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 787 msec 2024-12-05T07:44:51,824 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733384691824, completionTime=-1 2024-12-05T07:44:51,824 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-05T07:44:51,824 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-05T07:44:51,827 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-05T07:44:51,827 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733384751827 2024-12-05T07:44:51,827 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733384811827 2024-12-05T07:44:51,827 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-05T07:44:51,827 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=fc6cd38557f3,44233,1733384690399-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:51,828 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=fc6cd38557f3,44233,1733384690399-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:51,828 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=fc6cd38557f3,44233,1733384690399-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:51,828 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-fc6cd38557f3:44233, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:51,828 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:51,830 DEBUG [master/fc6cd38557f3:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-05T07:44:51,833 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:51,840 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.132sec 2024-12-05T07:44:51,840 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-05T07:44:51,840 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-05T07:44:51,840 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-05T07:44:51,840 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-05T07:44:51,841 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-05T07:44:51,841 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=fc6cd38557f3,44233,1733384690399-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T07:44:51,841 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=fc6cd38557f3,44233,1733384690399-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-05T07:44:51,847 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-05T07:44:51,847 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-05T07:44:51,847 INFO [master/fc6cd38557f3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=fc6cd38557f3,44233,1733384690399-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T07:44:51,867 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@438ad0b4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T07:44:51,867 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request fc6cd38557f3,44233,-1 for getting cluster id 2024-12-05T07:44:51,867 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T07:44:51,872 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '17d6b223-5cb5-4437-80c1-e3d566d41f98' 2024-12-05T07:44:51,872 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T07:44:51,873 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "17d6b223-5cb5-4437-80c1-e3d566d41f98" 2024-12-05T07:44:51,873 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@240bfab1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T07:44:51,873 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [fc6cd38557f3,44233,-1] 2024-12-05T07:44:51,873 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T07:44:51,874 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T07:44:51,876 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53398, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T07:44:51,878 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ef9cf8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T07:44:51,878 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T07:44:51,880 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fc6cd38557f3,42055,1733384690542, seqNum=-1] 2024-12-05T07:44:51,881 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T07:44:51,884 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44860, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T07:44:51,887 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=fc6cd38557f3,44233,1733384690399 2024-12-05T07:44:51,887 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-05T07:44:51,889 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncConnectionImpl(321): The fetched master address is fc6cd38557f3,44233,1733384690399 2024-12-05T07:44:51,889 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@8243a04 2024-12-05T07:44:51,889 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-05T07:44:51,891 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53406, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-05T07:44:51,892 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44233 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T07:44:51,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44233 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-05T07:44:51,897 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-05T07:44:51,897 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T07:44:51,897 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44233 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-05T07:44:51,899 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-05T07:44:51,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44233 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T07:44:51,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38583 is added to blk_1073741837_1013 (size=392) 2024-12-05T07:44:51,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45877 is added to blk_1073741837_1013 (size=392) 2024-12-05T07:44:51,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40881 is added to blk_1073741837_1013 (size=392) 2024-12-05T07:44:51,927 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => bddefc8e3a76af8a051481a8f05635e9, NAME => 'TestHBaseWalOnEC,,1733384691892.bddefc8e3a76af8a051481a8f05635e9.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0 2024-12-05T07:44:51,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38583 is added to blk_1073741838_1014 (size=51) 2024-12-05T07:44:51,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45877 is added to blk_1073741838_1014 (size=51) 2024-12-05T07:44:51,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40881 is added to blk_1073741838_1014 (size=51) 2024-12-05T07:44:51,955 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733384691892.bddefc8e3a76af8a051481a8f05635e9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T07:44:51,956 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing bddefc8e3a76af8a051481a8f05635e9, disabling compactions & flushes 2024-12-05T07:44:51,956 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733384691892.bddefc8e3a76af8a051481a8f05635e9. 2024-12-05T07:44:51,956 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733384691892.bddefc8e3a76af8a051481a8f05635e9. 2024-12-05T07:44:51,956 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733384691892.bddefc8e3a76af8a051481a8f05635e9. after waiting 0 ms 2024-12-05T07:44:51,956 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733384691892.bddefc8e3a76af8a051481a8f05635e9. 2024-12-05T07:44:51,956 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733384691892.bddefc8e3a76af8a051481a8f05635e9. 2024-12-05T07:44:51,956 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for bddefc8e3a76af8a051481a8f05635e9: Waiting for close lock at 1733384691956Disabling compacts and flushes for region at 1733384691956Disabling writes for close at 1733384691956Writing region close event to WAL at 1733384691956Closed at 1733384691956 2024-12-05T07:44:51,958 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-05T07:44:51,959 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733384691892.bddefc8e3a76af8a051481a8f05635e9.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733384691959"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733384691959"}]},"ts":"1733384691959"} 2024-12-05T07:44:51,964 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-05T07:44:51,966 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-05T07:44:51,967 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733384691966"}]},"ts":"1733384691966"} 2024-12-05T07:44:51,974 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-05T07:44:51,974 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {fc6cd38557f3=0} racks are {/default-rack=0} 2024-12-05T07:44:51,975 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T07:44:51,975 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T07:44:51,975 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T07:44:51,975 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T07:44:51,976 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T07:44:51,976 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T07:44:51,976 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T07:44:51,976 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T07:44:51,976 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T07:44:51,976 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T07:44:51,976 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=bddefc8e3a76af8a051481a8f05635e9, ASSIGN}] 2024-12-05T07:44:51,979 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=bddefc8e3a76af8a051481a8f05635e9, ASSIGN 2024-12-05T07:44:51,982 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=bddefc8e3a76af8a051481a8f05635e9, ASSIGN; state=OFFLINE, location=fc6cd38557f3,37385,1733384690585; forceNewPlan=false, retain=false 2024-12-05T07:44:52,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44233 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T07:44:52,133 INFO [fc6cd38557f3:44233 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-05T07:44:52,134 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=bddefc8e3a76af8a051481a8f05635e9, regionState=OPENING, regionLocation=fc6cd38557f3,37385,1733384690585 2024-12-05T07:44:52,138 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=bddefc8e3a76af8a051481a8f05635e9, ASSIGN because future has completed 2024-12-05T07:44:52,139 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure bddefc8e3a76af8a051481a8f05635e9, server=fc6cd38557f3,37385,1733384690585}] 2024-12-05T07:44:52,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44233 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T07:44:52,294 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-05T07:44:52,295 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35559, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-05T07:44:52,301 INFO [RS_OPEN_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733384691892.bddefc8e3a76af8a051481a8f05635e9. 2024-12-05T07:44:52,302 DEBUG [RS_OPEN_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => bddefc8e3a76af8a051481a8f05635e9, NAME => 'TestHBaseWalOnEC,,1733384691892.bddefc8e3a76af8a051481a8f05635e9.', STARTKEY => '', ENDKEY => ''} 2024-12-05T07:44:52,302 DEBUG [RS_OPEN_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC bddefc8e3a76af8a051481a8f05635e9 2024-12-05T07:44:52,302 DEBUG [RS_OPEN_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733384691892.bddefc8e3a76af8a051481a8f05635e9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T07:44:52,303 DEBUG [RS_OPEN_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for bddefc8e3a76af8a051481a8f05635e9 2024-12-05T07:44:52,303 DEBUG [RS_OPEN_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for bddefc8e3a76af8a051481a8f05635e9 2024-12-05T07:44:52,311 INFO [StoreOpener-bddefc8e3a76af8a051481a8f05635e9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region bddefc8e3a76af8a051481a8f05635e9 2024-12-05T07:44:52,313 INFO [StoreOpener-bddefc8e3a76af8a051481a8f05635e9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bddefc8e3a76af8a051481a8f05635e9 columnFamilyName cf 2024-12-05T07:44:52,313 DEBUG [StoreOpener-bddefc8e3a76af8a051481a8f05635e9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T07:44:52,314 INFO [StoreOpener-bddefc8e3a76af8a051481a8f05635e9-1 {}] regionserver.HStore(327): Store=bddefc8e3a76af8a051481a8f05635e9/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T07:44:52,315 DEBUG [RS_OPEN_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for bddefc8e3a76af8a051481a8f05635e9 2024-12-05T07:44:52,316 DEBUG [RS_OPEN_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/data/default/TestHBaseWalOnEC/bddefc8e3a76af8a051481a8f05635e9 2024-12-05T07:44:52,317 DEBUG [RS_OPEN_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/data/default/TestHBaseWalOnEC/bddefc8e3a76af8a051481a8f05635e9 2024-12-05T07:44:52,317 DEBUG [RS_OPEN_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for bddefc8e3a76af8a051481a8f05635e9 2024-12-05T07:44:52,318 DEBUG [RS_OPEN_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for bddefc8e3a76af8a051481a8f05635e9 2024-12-05T07:44:52,320 DEBUG [RS_OPEN_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for bddefc8e3a76af8a051481a8f05635e9 2024-12-05T07:44:52,325 DEBUG [RS_OPEN_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/data/default/TestHBaseWalOnEC/bddefc8e3a76af8a051481a8f05635e9/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T07:44:52,325 INFO [RS_OPEN_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened bddefc8e3a76af8a051481a8f05635e9; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71119196, jitterRate=0.05975860357284546}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T07:44:52,326 DEBUG [RS_OPEN_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for bddefc8e3a76af8a051481a8f05635e9 2024-12-05T07:44:52,326 DEBUG [RS_OPEN_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for bddefc8e3a76af8a051481a8f05635e9: Running coprocessor pre-open hook at 1733384692303Writing region info on filesystem at 1733384692303Initializing all the Stores at 1733384692308 (+5 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733384692308Cleaning up temporary data from old regions at 1733384692318 (+10 ms)Running coprocessor post-open hooks at 1733384692326 (+8 ms)Region opened successfully at 1733384692326 2024-12-05T07:44:52,328 INFO [RS_OPEN_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733384691892.bddefc8e3a76af8a051481a8f05635e9., pid=6, masterSystemTime=1733384692293 2024-12-05T07:44:52,339 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=bddefc8e3a76af8a051481a8f05635e9, regionState=OPEN, openSeqNum=2, regionLocation=fc6cd38557f3,37385,1733384690585 2024-12-05T07:44:52,341 DEBUG [RS_OPEN_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733384691892.bddefc8e3a76af8a051481a8f05635e9. 2024-12-05T07:44:52,341 INFO [RS_OPEN_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733384691892.bddefc8e3a76af8a051481a8f05635e9. 2024-12-05T07:44:52,343 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure bddefc8e3a76af8a051481a8f05635e9, server=fc6cd38557f3,37385,1733384690585 because future has completed 2024-12-05T07:44:52,352 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-05T07:44:52,352 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure bddefc8e3a76af8a051481a8f05635e9, server=fc6cd38557f3,37385,1733384690585 in 207 msec 2024-12-05T07:44:52,358 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-05T07:44:52,358 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=bddefc8e3a76af8a051481a8f05635e9, ASSIGN in 376 msec 2024-12-05T07:44:52,361 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-05T07:44:52,362 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733384692361"}]},"ts":"1733384692361"} 2024-12-05T07:44:52,366 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-05T07:44:52,368 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-05T07:44:52,374 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 476 msec 2024-12-05T07:44:52,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44233 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T07:44:52,528 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-05T07:44:52,528 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-05T07:44:52,529 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T07:44:52,533 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-05T07:44:52,533 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T07:44:52,534 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-05T07:44:52,537 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733384691892.bddefc8e3a76af8a051481a8f05635e9., hostname=fc6cd38557f3,37385,1733384690585, seqNum=2] 2024-12-05T07:44:52,538 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T07:44:52,543 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43974, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T07:44:52,548 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44233 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-05T07:44:52,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44233 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-05T07:44:52,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44233 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-05T07:44:52,554 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-05T07:44:52,556 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-05T07:44:52,556 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-05T07:44:52,651 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-05T07:44:52,652 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-05T07:44:52,654 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-05T07:44:52,654 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-05T07:44:52,655 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-05T07:44:52,655 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-05T07:44:52,656 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestHBaseWalOnEC 2024-12-05T07:44:52,656 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestHBaseWalOnEC Metrics about Tables on a single HBase RegionServer 2024-12-05T07:44:52,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44233 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-05T07:44:52,713 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37385 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-05T07:44:52,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fc6cd38557f3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733384691892.bddefc8e3a76af8a051481a8f05635e9. 2024-12-05T07:44:52,714 INFO [RS_FLUSH_OPERATIONS-regionserver/fc6cd38557f3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing bddefc8e3a76af8a051481a8f05635e9 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-05T07:44:52,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fc6cd38557f3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/data/default/TestHBaseWalOnEC/bddefc8e3a76af8a051481a8f05635e9/.tmp/cf/db73bbfcd4c34579b6cd2732e90459cd is 36, key is row/cf:cq/1733384692544/Put/seqid=0 2024-12-05T07:44:52,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45877 is added to blk_1073741839_1015 (size=4787) 2024-12-05T07:44:52,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40881 is added to blk_1073741839_1015 (size=4787) 2024-12-05T07:44:52,750 INFO [RS_FLUSH_OPERATIONS-regionserver/fc6cd38557f3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/data/default/TestHBaseWalOnEC/bddefc8e3a76af8a051481a8f05635e9/.tmp/cf/db73bbfcd4c34579b6cd2732e90459cd 2024-12-05T07:44:52,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38583 is added to blk_1073741839_1015 (size=4787) 2024-12-05T07:44:52,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fc6cd38557f3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/data/default/TestHBaseWalOnEC/bddefc8e3a76af8a051481a8f05635e9/.tmp/cf/db73bbfcd4c34579b6cd2732e90459cd as hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/data/default/TestHBaseWalOnEC/bddefc8e3a76af8a051481a8f05635e9/cf/db73bbfcd4c34579b6cd2732e90459cd 2024-12-05T07:44:52,775 INFO [RS_FLUSH_OPERATIONS-regionserver/fc6cd38557f3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/data/default/TestHBaseWalOnEC/bddefc8e3a76af8a051481a8f05635e9/cf/db73bbfcd4c34579b6cd2732e90459cd, entries=1, sequenceid=5, filesize=4.7 K 2024-12-05T07:44:52,777 INFO [RS_FLUSH_OPERATIONS-regionserver/fc6cd38557f3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for bddefc8e3a76af8a051481a8f05635e9 in 63ms, sequenceid=5, compaction requested=false 2024-12-05T07:44:52,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fc6cd38557f3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for bddefc8e3a76af8a051481a8f05635e9: 2024-12-05T07:44:52,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fc6cd38557f3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733384691892.bddefc8e3a76af8a051481a8f05635e9. 2024-12-05T07:44:52,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fc6cd38557f3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-05T07:44:52,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44233 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-05T07:44:52,785 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-05T07:44:52,785 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 224 msec 2024-12-05T07:44:52,790 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 238 msec 2024-12-05T07:44:52,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44233 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-05T07:44:52,869 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-05T07:44:52,873 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-05T07:44:52,873 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-05T07:44:52,874 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T07:44:52,874 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T07:44:52,874 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T07:44:52,874 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-05T07:44:52,874 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=457508494, stopped=false 2024-12-05T07:44:52,874 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T07:44:52,874 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=fc6cd38557f3,44233,1733384690399 2024-12-05T07:44:52,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42825-0x101a5bced8f0003, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T07:44:52,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42055-0x101a5bced8f0001, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T07:44:52,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37385-0x101a5bced8f0002, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T07:44:52,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42825-0x101a5bced8f0003, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:52,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42055-0x101a5bced8f0001, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:52,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37385-0x101a5bced8f0002, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:52,898 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-05T07:44:52,898 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-05T07:44:52,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44233-0x101a5bced8f0000, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T07:44:52,898 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T07:44:52,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44233-0x101a5bced8f0000, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:52,898 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T07:44:52,898 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'fc6cd38557f3,42055,1733384690542' ***** 2024-12-05T07:44:52,899 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-05T07:44:52,899 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'fc6cd38557f3,37385,1733384690585' ***** 2024-12-05T07:44:52,899 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-05T07:44:52,899 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'fc6cd38557f3,42825,1733384690626' ***** 2024-12-05T07:44:52,899 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-05T07:44:52,899 INFO [RS:0;fc6cd38557f3:42055 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-05T07:44:52,899 INFO [RS:2;fc6cd38557f3:42825 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-05T07:44:52,899 INFO [RS:2;fc6cd38557f3:42825 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-05T07:44:52,899 INFO [RS:0;fc6cd38557f3:42055 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-05T07:44:52,899 INFO [RS:2;fc6cd38557f3:42825 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-05T07:44:52,899 INFO [RS:0;fc6cd38557f3:42055 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-05T07:44:52,900 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-05T07:44:52,900 INFO [RS:0;fc6cd38557f3:42055 {}] regionserver.HRegionServer(959): stopping server fc6cd38557f3,42055,1733384690542 2024-12-05T07:44:52,900 INFO [RS:0;fc6cd38557f3:42055 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T07:44:52,900 INFO [RS:0;fc6cd38557f3:42055 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;fc6cd38557f3:42055. 2024-12-05T07:44:52,900 INFO [RS:1;fc6cd38557f3:37385 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-05T07:44:52,900 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-05T07:44:52,901 INFO [RS:1;fc6cd38557f3:37385 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-05T07:44:52,901 DEBUG [RS:0;fc6cd38557f3:42055 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T07:44:52,901 INFO [RS:1;fc6cd38557f3:37385 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-05T07:44:52,901 DEBUG [RS:0;fc6cd38557f3:42055 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T07:44:52,901 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-05T07:44:52,901 INFO [RS:1;fc6cd38557f3:37385 {}] regionserver.HRegionServer(3091): Received CLOSE for bddefc8e3a76af8a051481a8f05635e9 2024-12-05T07:44:52,899 INFO [RS:2;fc6cd38557f3:42825 {}] regionserver.HRegionServer(959): stopping server fc6cd38557f3,42825,1733384690626 2024-12-05T07:44:52,901 INFO [RS:2;fc6cd38557f3:42825 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T07:44:52,901 INFO [RS:2;fc6cd38557f3:42825 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;fc6cd38557f3:42825. 2024-12-05T07:44:52,901 DEBUG [RS:2;fc6cd38557f3:42825 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T07:44:52,901 INFO [RS:0;fc6cd38557f3:42055 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-05T07:44:52,901 DEBUG [RS:2;fc6cd38557f3:42825 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T07:44:52,901 INFO [RS:0;fc6cd38557f3:42055 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-05T07:44:52,901 INFO [RS:0;fc6cd38557f3:42055 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-05T07:44:52,901 INFO [RS:2;fc6cd38557f3:42825 {}] regionserver.HRegionServer(976): stopping server fc6cd38557f3,42825,1733384690626; all regions closed. 2024-12-05T07:44:52,901 INFO [RS:0;fc6cd38557f3:42055 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-05T07:44:52,902 INFO [RS:1;fc6cd38557f3:37385 {}] regionserver.HRegionServer(959): stopping server fc6cd38557f3,37385,1733384690585 2024-12-05T07:44:52,902 INFO [RS:1;fc6cd38557f3:37385 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T07:44:52,902 INFO [RS:1;fc6cd38557f3:37385 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;fc6cd38557f3:37385. 2024-12-05T07:44:52,902 DEBUG [RS:1;fc6cd38557f3:37385 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T07:44:52,902 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42055-0x101a5bced8f0001, quorum=127.0.0.1:56053, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T07:44:52,902 DEBUG [RS:1;fc6cd38557f3:37385 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T07:44:52,902 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44233-0x101a5bced8f0000, quorum=127.0.0.1:56053, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T07:44:52,902 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37385-0x101a5bced8f0002, quorum=127.0.0.1:56053, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T07:44:52,902 INFO [RS:1;fc6cd38557f3:37385 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-05T07:44:52,902 DEBUG [RS:1;fc6cd38557f3:37385 {}] regionserver.HRegionServer(1325): Online Regions={bddefc8e3a76af8a051481a8f05635e9=TestHBaseWalOnEC,,1733384691892.bddefc8e3a76af8a051481a8f05635e9.} 2024-12-05T07:44:52,902 DEBUG [RS:1;fc6cd38557f3:37385 {}] regionserver.HRegionServer(1351): Waiting on bddefc8e3a76af8a051481a8f05635e9 2024-12-05T07:44:52,903 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42825-0x101a5bced8f0003, quorum=127.0.0.1:56053, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T07:44:52,903 INFO [regionserver/fc6cd38557f3:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T07:44:52,903 DEBUG [RS_CLOSE_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing bddefc8e3a76af8a051481a8f05635e9, disabling compactions & flushes 2024-12-05T07:44:52,903 INFO [RS_CLOSE_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733384691892.bddefc8e3a76af8a051481a8f05635e9. 2024-12-05T07:44:52,904 DEBUG [RS_CLOSE_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733384691892.bddefc8e3a76af8a051481a8f05635e9. 2024-12-05T07:44:52,904 DEBUG [RS_CLOSE_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733384691892.bddefc8e3a76af8a051481a8f05635e9. after waiting 0 ms 2024-12-05T07:44:52,904 DEBUG [RS_CLOSE_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733384691892.bddefc8e3a76af8a051481a8f05635e9. 2024-12-05T07:44:52,905 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T07:44:52,905 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T07:44:52,905 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T07:44:52,905 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T07:44:52,905 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T07:44:52,911 INFO [RS:0;fc6cd38557f3:42055 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-05T07:44:52,911 DEBUG [RS:0;fc6cd38557f3:42055 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-05T07:44:52,911 DEBUG [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-05T07:44:52,911 DEBUG [RS:0;fc6cd38557f3:42055 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-05T07:44:52,911 INFO [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-05T07:44:52,911 DEBUG [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-05T07:44:52,911 DEBUG [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-05T07:44:52,911 DEBUG [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-05T07:44:52,912 INFO [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-05T07:44:52,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40881 is added to blk_1073741833_1009 (size=93) 2024-12-05T07:44:52,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38583 is added to blk_1073741833_1009 (size=93) 2024-12-05T07:44:52,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45877 is added to blk_1073741833_1009 (size=93) 2024-12-05T07:44:52,921 DEBUG [RS:2;fc6cd38557f3:42825 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/oldWALs 2024-12-05T07:44:52,921 INFO [RS:2;fc6cd38557f3:42825 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog fc6cd38557f3%2C42825%2C1733384690626:(num 1733384691326) 2024-12-05T07:44:52,921 DEBUG [RS:2;fc6cd38557f3:42825 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T07:44:52,921 INFO [RS:2;fc6cd38557f3:42825 {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T07:44:52,922 INFO [RS:2;fc6cd38557f3:42825 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T07:44:52,922 INFO [RS:2;fc6cd38557f3:42825 {}] hbase.ChoreService(370): Chore service for: regionserver/fc6cd38557f3:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-05T07:44:52,922 INFO [RS:2;fc6cd38557f3:42825 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-05T07:44:52,922 INFO [RS:2;fc6cd38557f3:42825 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-05T07:44:52,922 INFO [RS:2;fc6cd38557f3:42825 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-05T07:44:52,922 INFO [RS:2;fc6cd38557f3:42825 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T07:44:52,922 INFO [RS:2;fc6cd38557f3:42825 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42825 2024-12-05T07:44:52,923 INFO [regionserver/fc6cd38557f3:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T07:44:52,927 DEBUG [RS_CLOSE_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/data/default/TestHBaseWalOnEC/bddefc8e3a76af8a051481a8f05635e9/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-05T07:44:52,928 INFO [RS_CLOSE_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733384691892.bddefc8e3a76af8a051481a8f05635e9. 2024-12-05T07:44:52,929 DEBUG [RS_CLOSE_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for bddefc8e3a76af8a051481a8f05635e9: Waiting for close lock at 1733384692903Running coprocessor pre-close hooks at 1733384692903Disabling compacts and flushes for region at 1733384692903Disabling writes for close at 1733384692904 (+1 ms)Writing region close event to WAL at 1733384692915 (+11 ms)Running coprocessor post-close hooks at 1733384692928 (+13 ms)Closed at 1733384692928 2024-12-05T07:44:52,929 DEBUG [RS_CLOSE_REGION-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733384691892.bddefc8e3a76af8a051481a8f05635e9. 2024-12-05T07:44:52,931 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42825-0x101a5bced8f0003, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/fc6cd38557f3,42825,1733384690626 2024-12-05T07:44:52,931 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44233-0x101a5bced8f0000, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T07:44:52,931 INFO [RS:2;fc6cd38557f3:42825 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T07:44:52,932 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [fc6cd38557f3,42825,1733384690626] 2024-12-05T07:44:52,947 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/fc6cd38557f3,42825,1733384690626 already deleted, retry=false 2024-12-05T07:44:52,947 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; fc6cd38557f3,42825,1733384690626 expired; onlineServers=2 2024-12-05T07:44:52,960 DEBUG [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/data/hbase/meta/1588230740/.tmp/info/a0efe15983c14e7aaf9dc6d52ad095af is 153, key is TestHBaseWalOnEC,,1733384691892.bddefc8e3a76af8a051481a8f05635e9./info:regioninfo/1733384692339/Put/seqid=0 2024-12-05T07:44:52,962 WARN [IPC Server handler 4 on default port 36391 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-05T07:44:52,962 WARN [IPC Server handler 4 on default port 36391 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-05T07:44:52,962 WARN [IPC Server handler 4 on default port 36391 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-05T07:44:52,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40881 is added to blk_1073741840_1016 (size=6637) 2024-12-05T07:44:52,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45877 is added to blk_1073741840_1016 (size=6637) 2024-12-05T07:44:52,990 INFO [regionserver/fc6cd38557f3:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T07:44:52,991 INFO [regionserver/fc6cd38557f3:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T07:44:53,039 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42825-0x101a5bced8f0003, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T07:44:53,039 INFO [RS:2;fc6cd38557f3:42825 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T07:44:53,039 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42825-0x101a5bced8f0003, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T07:44:53,039 INFO [RS:2;fc6cd38557f3:42825 {}] regionserver.HRegionServer(1031): Exiting; stopping=fc6cd38557f3,42825,1733384690626; zookeeper connection closed. 2024-12-05T07:44:53,043 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2dab0232 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2dab0232 2024-12-05T07:44:53,103 INFO [RS:1;fc6cd38557f3:37385 {}] regionserver.HRegionServer(976): stopping server fc6cd38557f3,37385,1733384690585; all regions closed. 2024-12-05T07:44:53,103 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T07:44:53,103 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T07:44:53,104 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T07:44:53,104 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T07:44:53,104 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T07:44:53,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40881 is added to blk_1073741834_1010 (size=1298) 2024-12-05T07:44:53,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38583 is added to blk_1073741834_1010 (size=1298) 2024-12-05T07:44:53,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45877 is added to blk_1073741834_1010 (size=1298) 2024-12-05T07:44:53,111 DEBUG [RS:0;fc6cd38557f3:42055 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-05T07:44:53,115 DEBUG [RS:1;fc6cd38557f3:37385 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/oldWALs 2024-12-05T07:44:53,115 INFO [RS:1;fc6cd38557f3:37385 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog fc6cd38557f3%2C37385%2C1733384690585:(num 1733384691333) 2024-12-05T07:44:53,115 DEBUG [RS:1;fc6cd38557f3:37385 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T07:44:53,115 INFO [RS:1;fc6cd38557f3:37385 {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T07:44:53,116 INFO [RS:1;fc6cd38557f3:37385 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T07:44:53,116 INFO [RS:1;fc6cd38557f3:37385 {}] hbase.ChoreService(370): Chore service for: regionserver/fc6cd38557f3:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-05T07:44:53,116 INFO [RS:1;fc6cd38557f3:37385 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-05T07:44:53,116 INFO [RS:1;fc6cd38557f3:37385 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-05T07:44:53,116 INFO [RS:1;fc6cd38557f3:37385 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-05T07:44:53,116 INFO [RS:1;fc6cd38557f3:37385 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T07:44:53,116 INFO [RS:1;fc6cd38557f3:37385 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37385 2024-12-05T07:44:53,117 INFO [regionserver/fc6cd38557f3:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T07:44:53,139 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37385-0x101a5bced8f0002, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/fc6cd38557f3,37385,1733384690585 2024-12-05T07:44:53,139 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44233-0x101a5bced8f0000, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T07:44:53,139 INFO [RS:1;fc6cd38557f3:37385 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T07:44:53,147 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [fc6cd38557f3,37385,1733384690585] 2024-12-05T07:44:53,155 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/fc6cd38557f3,37385,1733384690585 already deleted, retry=false 2024-12-05T07:44:53,156 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; fc6cd38557f3,37385,1733384690585 expired; onlineServers=1 2024-12-05T07:44:53,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37385-0x101a5bced8f0002, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T07:44:53,248 INFO [RS:1;fc6cd38557f3:37385 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T07:44:53,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37385-0x101a5bced8f0002, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T07:44:53,248 INFO [RS:1;fc6cd38557f3:37385 {}] regionserver.HRegionServer(1031): Exiting; stopping=fc6cd38557f3,37385,1733384690585; zookeeper connection closed. 2024-12-05T07:44:53,248 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1668905 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1668905 2024-12-05T07:44:53,290 INFO [regionserver/fc6cd38557f3:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-05T07:44:53,290 INFO [regionserver/fc6cd38557f3:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-05T07:44:53,311 DEBUG [RS:0;fc6cd38557f3:42055 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-05T07:44:53,369 INFO [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/data/hbase/meta/1588230740/.tmp/info/a0efe15983c14e7aaf9dc6d52ad095af 2024-12-05T07:44:53,393 DEBUG [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/data/hbase/meta/1588230740/.tmp/ns/1d45a6bc90bd4a87a0a0503c7f23de3d is 43, key is default/ns:d/1733384691809/Put/seqid=0 2024-12-05T07:44:53,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38583 is added to blk_1073741841_1017 (size=5153) 2024-12-05T07:44:53,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40881 is added to blk_1073741841_1017 (size=5153) 2024-12-05T07:44:53,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45877 is added to blk_1073741841_1017 (size=5153) 2024-12-05T07:44:53,400 INFO [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/data/hbase/meta/1588230740/.tmp/ns/1d45a6bc90bd4a87a0a0503c7f23de3d 2024-12-05T07:44:53,452 DEBUG [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/data/hbase/meta/1588230740/.tmp/table/76ba246c55be4946a669e5b79639e737 is 52, key is TestHBaseWalOnEC/table:state/1733384692361/Put/seqid=0 2024-12-05T07:44:53,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38583 is added to blk_1073741842_1018 (size=5249) 2024-12-05T07:44:53,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45877 is added to blk_1073741842_1018 (size=5249) 2024-12-05T07:44:53,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40881 is added to blk_1073741842_1018 (size=5249) 2024-12-05T07:44:53,460 INFO [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/data/hbase/meta/1588230740/.tmp/table/76ba246c55be4946a669e5b79639e737 2024-12-05T07:44:53,468 DEBUG [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/data/hbase/meta/1588230740/.tmp/info/a0efe15983c14e7aaf9dc6d52ad095af as hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/data/hbase/meta/1588230740/info/a0efe15983c14e7aaf9dc6d52ad095af 2024-12-05T07:44:53,475 INFO [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/data/hbase/meta/1588230740/info/a0efe15983c14e7aaf9dc6d52ad095af, entries=10, sequenceid=11, filesize=6.5 K 2024-12-05T07:44:53,476 DEBUG [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/data/hbase/meta/1588230740/.tmp/ns/1d45a6bc90bd4a87a0a0503c7f23de3d as hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/data/hbase/meta/1588230740/ns/1d45a6bc90bd4a87a0a0503c7f23de3d 2024-12-05T07:44:53,485 INFO [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/data/hbase/meta/1588230740/ns/1d45a6bc90bd4a87a0a0503c7f23de3d, entries=2, sequenceid=11, filesize=5.0 K 2024-12-05T07:44:53,486 DEBUG [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/data/hbase/meta/1588230740/.tmp/table/76ba246c55be4946a669e5b79639e737 as hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/data/hbase/meta/1588230740/table/76ba246c55be4946a669e5b79639e737 2024-12-05T07:44:53,494 INFO [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/data/hbase/meta/1588230740/table/76ba246c55be4946a669e5b79639e737, entries=2, sequenceid=11, filesize=5.1 K 2024-12-05T07:44:53,496 INFO [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 584ms, sequenceid=11, compaction requested=false 2024-12-05T07:44:53,503 DEBUG [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-05T07:44:53,504 DEBUG [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-05T07:44:53,504 INFO [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-05T07:44:53,504 DEBUG [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733384692911Running coprocessor pre-close hooks at 1733384692911Disabling compacts and flushes for region at 1733384692911Disabling writes for close at 1733384692911Obtaining lock to block concurrent updates at 1733384692912 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733384692912Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733384692913 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733384692916 (+3 ms)Flushing 1588230740/info: creating writer at 1733384692916Flushing 1588230740/info: appending metadata at 1733384692960 (+44 ms)Flushing 1588230740/info: closing flushed file at 1733384692960Flushing 1588230740/ns: creating writer at 1733384693378 (+418 ms)Flushing 1588230740/ns: appending metadata at 1733384693392 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1733384693392Flushing 1588230740/table: creating writer at 1733384693408 (+16 ms)Flushing 1588230740/table: appending metadata at 1733384693451 (+43 ms)Flushing 1588230740/table: closing flushed file at 1733384693451Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@a53f5aa: reopening flushed file at 1733384693466 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7235e024: reopening flushed file at 1733384693475 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@426b5951: reopening flushed file at 1733384693485 (+10 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 584ms, sequenceid=11, compaction requested=false at 1733384693496 (+11 ms)Writing region close event to WAL at 1733384693498 (+2 ms)Running coprocessor post-close hooks at 1733384693504 (+6 ms)Closed at 1733384693504 2024-12-05T07:44:53,504 DEBUG [RS_CLOSE_META-regionserver/fc6cd38557f3:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-05T07:44:53,512 INFO [RS:0;fc6cd38557f3:42055 {}] regionserver.HRegionServer(976): stopping server fc6cd38557f3,42055,1733384690542; all regions closed. 2024-12-05T07:44:53,512 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T07:44:53,512 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T07:44:53,513 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T07:44:53,513 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T07:44:53,513 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T07:44:53,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40881 is added to blk_1073741836_1012 (size=2751) 2024-12-05T07:44:53,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38583 is added to blk_1073741836_1012 (size=2751) 2024-12-05T07:44:53,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45877 is added to blk_1073741836_1012 (size=2751) 2024-12-05T07:44:53,518 DEBUG [RS:0;fc6cd38557f3:42055 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/oldWALs 2024-12-05T07:44:53,518 INFO [RS:0;fc6cd38557f3:42055 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog fc6cd38557f3%2C42055%2C1733384690542.meta:.meta(num 1733384691731) 2024-12-05T07:44:53,519 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T07:44:53,519 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T07:44:53,519 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T07:44:53,519 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T07:44:53,519 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T07:44:53,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40881 is added to blk_1073741835_1011 (size=93) 2024-12-05T07:44:53,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45877 is added to blk_1073741835_1011 (size=93) 2024-12-05T07:44:53,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38583 is added to blk_1073741835_1011 (size=93) 2024-12-05T07:44:53,526 DEBUG [RS:0;fc6cd38557f3:42055 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/oldWALs 2024-12-05T07:44:53,526 INFO [RS:0;fc6cd38557f3:42055 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog fc6cd38557f3%2C42055%2C1733384690542:(num 1733384691343) 2024-12-05T07:44:53,526 DEBUG [RS:0;fc6cd38557f3:42055 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T07:44:53,526 INFO [RS:0;fc6cd38557f3:42055 {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T07:44:53,526 INFO [RS:0;fc6cd38557f3:42055 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T07:44:53,526 INFO [RS:0;fc6cd38557f3:42055 {}] hbase.ChoreService(370): Chore service for: regionserver/fc6cd38557f3:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-05T07:44:53,526 INFO [RS:0;fc6cd38557f3:42055 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T07:44:53,526 INFO [regionserver/fc6cd38557f3:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T07:44:53,527 INFO [RS:0;fc6cd38557f3:42055 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42055 2024-12-05T07:44:53,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44233-0x101a5bced8f0000, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T07:44:53,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42055-0x101a5bced8f0001, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/fc6cd38557f3,42055,1733384690542 2024-12-05T07:44:53,572 INFO [RS:0;fc6cd38557f3:42055 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T07:44:53,572 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$371/0x00007f63208f8520@6d80d6df rejected from java.util.concurrent.ThreadPoolExecutor@2882550b[Terminated, pool size = 0, active threads = 0, queued tasks = 0, completed tasks = 14] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1365) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-12-05T07:44:53,581 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [fc6cd38557f3,42055,1733384690542] 2024-12-05T07:44:53,589 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/fc6cd38557f3,42055,1733384690542 already deleted, retry=false 2024-12-05T07:44:53,589 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; fc6cd38557f3,42055,1733384690542 expired; onlineServers=0 2024-12-05T07:44:53,589 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'fc6cd38557f3,44233,1733384690399' ***** 2024-12-05T07:44:53,589 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-05T07:44:53,589 INFO [M:0;fc6cd38557f3:44233 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T07:44:53,590 INFO [M:0;fc6cd38557f3:44233 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T07:44:53,590 DEBUG [M:0;fc6cd38557f3:44233 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-05T07:44:53,590 DEBUG [M:0;fc6cd38557f3:44233 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-05T07:44:53,590 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-05T07:44:53,590 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster-HFileCleaner.large.0-1733384691048 {}] cleaner.HFileCleaner(306): Exit Thread[master/fc6cd38557f3:0:becomeActiveMaster-HFileCleaner.large.0-1733384691048,5,FailOnTimeoutGroup] 2024-12-05T07:44:53,590 INFO [M:0;fc6cd38557f3:44233 {}] hbase.ChoreService(370): Chore service for: master/fc6cd38557f3:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-05T07:44:53,590 INFO [M:0;fc6cd38557f3:44233 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T07:44:53,590 DEBUG [M:0;fc6cd38557f3:44233 {}] master.HMaster(1795): Stopping service threads 2024-12-05T07:44:53,590 INFO [M:0;fc6cd38557f3:44233 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-05T07:44:53,590 DEBUG [master/fc6cd38557f3:0:becomeActiveMaster-HFileCleaner.small.0-1733384691048 {}] cleaner.HFileCleaner(306): Exit Thread[master/fc6cd38557f3:0:becomeActiveMaster-HFileCleaner.small.0-1733384691048,5,FailOnTimeoutGroup] 2024-12-05T07:44:53,591 INFO [M:0;fc6cd38557f3:44233 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-05T07:44:53,591 INFO [M:0;fc6cd38557f3:44233 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-05T07:44:53,591 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-05T07:44:53,604 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44233-0x101a5bced8f0000, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-05T07:44:53,604 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44233-0x101a5bced8f0000, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T07:44:53,605 DEBUG [M:0;fc6cd38557f3:44233 {}] zookeeper.ZKUtil(347): master:44233-0x101a5bced8f0000, quorum=127.0.0.1:56053, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-05T07:44:53,605 WARN [M:0;fc6cd38557f3:44233 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-05T07:44:53,606 INFO [M:0;fc6cd38557f3:44233 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/.lastflushedseqids 2024-12-05T07:44:53,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38583 is added to blk_1073741843_1019 (size=127) 2024-12-05T07:44:53,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45877 is added to blk_1073741843_1019 (size=127) 2024-12-05T07:44:53,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40881 is added to blk_1073741843_1019 (size=127) 2024-12-05T07:44:53,620 INFO [M:0;fc6cd38557f3:44233 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-05T07:44:53,621 INFO [M:0;fc6cd38557f3:44233 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-05T07:44:53,621 DEBUG [M:0;fc6cd38557f3:44233 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-05T07:44:53,621 INFO [M:0;fc6cd38557f3:44233 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T07:44:53,621 DEBUG [M:0;fc6cd38557f3:44233 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T07:44:53,621 DEBUG [M:0;fc6cd38557f3:44233 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-05T07:44:53,621 DEBUG [M:0;fc6cd38557f3:44233 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T07:44:53,621 INFO [M:0;fc6cd38557f3:44233 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.84 KB heapSize=34.13 KB 2024-12-05T07:44:53,645 DEBUG [M:0;fc6cd38557f3:44233 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e3d767805573468d8772b5405a20ad2d is 82, key is hbase:meta,,1/info:regioninfo/1733384691768/Put/seqid=0 2024-12-05T07:44:53,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45877 is added to blk_1073741844_1020 (size=5672) 2024-12-05T07:44:53,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38583 is added to blk_1073741844_1020 (size=5672) 2024-12-05T07:44:53,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40881 is added to blk_1073741844_1020 (size=5672) 2024-12-05T07:44:53,657 INFO [M:0;fc6cd38557f3:44233 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e3d767805573468d8772b5405a20ad2d 2024-12-05T07:44:53,677 DEBUG [M:0;fc6cd38557f3:44233 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f2816b5153e44c2da59e6a255fe114bf is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733384692372/Put/seqid=0 2024-12-05T07:44:53,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42055-0x101a5bced8f0001, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T07:44:53,681 INFO [RS:0;fc6cd38557f3:42055 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T07:44:53,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42055-0x101a5bced8f0001, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T07:44:53,681 INFO [RS:0;fc6cd38557f3:42055 {}] regionserver.HRegionServer(1031): Exiting; stopping=fc6cd38557f3,42055,1733384690542; zookeeper connection closed. 2024-12-05T07:44:53,682 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5dbcfe22 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5dbcfe22 2024-12-05T07:44:53,682 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-05T07:44:53,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40881 is added to blk_1073741845_1021 (size=6440) 2024-12-05T07:44:53,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45877 is added to blk_1073741845_1021 (size=6440) 2024-12-05T07:44:53,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38583 is added to blk_1073741845_1021 (size=6440) 2024-12-05T07:44:53,691 INFO [M:0;fc6cd38557f3:44233 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.15 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f2816b5153e44c2da59e6a255fe114bf 2024-12-05T07:44:53,715 DEBUG [M:0;fc6cd38557f3:44233 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/de8374a62bd54d52897bd8d9bb20338e is 69, key is fc6cd38557f3,37385,1733384690585/rs:state/1733384691123/Put/seqid=0 2024-12-05T07:44:53,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40881 is added to blk_1073741846_1022 (size=5294) 2024-12-05T07:44:53,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38583 is added to blk_1073741846_1022 (size=5294) 2024-12-05T07:44:53,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45877 is added to blk_1073741846_1022 (size=5294) 2024-12-05T07:44:53,724 INFO [M:0;fc6cd38557f3:44233 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/de8374a62bd54d52897bd8d9bb20338e 2024-12-05T07:44:53,731 DEBUG [M:0;fc6cd38557f3:44233 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e3d767805573468d8772b5405a20ad2d as hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e3d767805573468d8772b5405a20ad2d 2024-12-05T07:44:53,740 INFO [M:0;fc6cd38557f3:44233 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e3d767805573468d8772b5405a20ad2d, entries=8, sequenceid=72, filesize=5.5 K 2024-12-05T07:44:53,742 DEBUG [M:0;fc6cd38557f3:44233 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f2816b5153e44c2da59e6a255fe114bf as hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f2816b5153e44c2da59e6a255fe114bf 2024-12-05T07:44:53,750 INFO [M:0;fc6cd38557f3:44233 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f2816b5153e44c2da59e6a255fe114bf, entries=8, sequenceid=72, filesize=6.3 K 2024-12-05T07:44:53,751 DEBUG [M:0;fc6cd38557f3:44233 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/de8374a62bd54d52897bd8d9bb20338e as hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/de8374a62bd54d52897bd8d9bb20338e 2024-12-05T07:44:53,762 INFO [M:0;fc6cd38557f3:44233 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36391/user/jenkins/test-data/e8dd34b7-2137-2c33-c0ee-cd2bc2a688b0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/de8374a62bd54d52897bd8d9bb20338e, entries=3, sequenceid=72, filesize=5.2 K 2024-12-05T07:44:53,763 INFO [M:0;fc6cd38557f3:44233 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.84 KB/27480, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 142ms, sequenceid=72, compaction requested=false 2024-12-05T07:44:53,771 INFO [M:0;fc6cd38557f3:44233 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T07:44:53,771 DEBUG [M:0;fc6cd38557f3:44233 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733384693621Disabling compacts and flushes for region at 1733384693621Disabling writes for close at 1733384693621Obtaining lock to block concurrent updates at 1733384693621Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733384693621Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27480, getHeapSize=34880, getOffHeapSize=0, getCellsCount=85 at 1733384693622 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733384693623 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733384693623Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733384693645 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733384693645Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733384693663 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733384693677 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733384693677Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733384693698 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733384693714 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733384693714Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@35272e39: reopening flushed file at 1733384693730 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@35a9c9a6: reopening flushed file at 1733384693740 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@741cd5fe: reopening flushed file at 1733384693750 (+10 ms)Finished flush of dataSize ~26.84 KB/27480, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 142ms, sequenceid=72, compaction requested=false at 1733384693764 (+14 ms)Writing region close event to WAL at 1733384693771 (+7 ms)Closed at 1733384693771 2024-12-05T07:44:53,771 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T07:44:53,772 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T07:44:53,772 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T07:44:53,772 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T07:44:53,772 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-05T07:44:53,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40881 is added to blk_1073741830_1006 (size=32683) 2024-12-05T07:44:53,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45877 is added to blk_1073741830_1006 (size=32683) 2024-12-05T07:44:53,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38583 is added to blk_1073741830_1006 (size=32683) 2024-12-05T07:44:53,779 INFO [M:0;fc6cd38557f3:44233 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-05T07:44:53,779 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T07:44:53,780 INFO [M:0;fc6cd38557f3:44233 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44233 2024-12-05T07:44:53,780 INFO [M:0;fc6cd38557f3:44233 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T07:44:53,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44233-0x101a5bced8f0000, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T07:44:53,897 INFO [M:0;fc6cd38557f3:44233 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T07:44:53,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44233-0x101a5bced8f0000, quorum=127.0.0.1:56053, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T07:44:53,946 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1f872bbe{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T07:44:53,946 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@734ce002{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T07:44:53,946 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T07:44:53,946 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@19b3fbfc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T07:44:53,947 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4dcac8d6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/879b195a-6d70-226d-0978-9f63c906d950/hadoop.log.dir/,STOPPED} 2024-12-05T07:44:53,948 WARN [BP-1891229739-172.17.0.2-1733384688285 heartbeating to localhost/127.0.0.1:36391 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T07:44:53,948 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T07:44:53,948 WARN [BP-1891229739-172.17.0.2-1733384688285 heartbeating to localhost/127.0.0.1:36391 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1891229739-172.17.0.2-1733384688285 (Datanode Uuid 9c569f87-685a-4eae-afaf-5e046b9abb72) service to localhost/127.0.0.1:36391 2024-12-05T07:44:53,948 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T07:44:53,948 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/879b195a-6d70-226d-0978-9f63c906d950/cluster_31b7b7bd-a015-2a30-f4f5-858b91d603d0/data/data5/current/BP-1891229739-172.17.0.2-1733384688285 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T07:44:53,949 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/879b195a-6d70-226d-0978-9f63c906d950/cluster_31b7b7bd-a015-2a30-f4f5-858b91d603d0/data/data6/current/BP-1891229739-172.17.0.2-1733384688285 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T07:44:53,949 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T07:44:53,952 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2c130e95{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T07:44:53,952 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@21c2764e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T07:44:53,952 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T07:44:53,952 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@63ea337e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T07:44:53,953 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@29927a24{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/879b195a-6d70-226d-0978-9f63c906d950/hadoop.log.dir/,STOPPED} 2024-12-05T07:44:53,954 WARN [BP-1891229739-172.17.0.2-1733384688285 heartbeating to localhost/127.0.0.1:36391 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T07:44:53,954 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T07:44:53,954 WARN [BP-1891229739-172.17.0.2-1733384688285 heartbeating to localhost/127.0.0.1:36391 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1891229739-172.17.0.2-1733384688285 (Datanode Uuid 21449816-2259-4802-bd99-f4a5f5828df6) service to localhost/127.0.0.1:36391 2024-12-05T07:44:53,954 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T07:44:53,954 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/879b195a-6d70-226d-0978-9f63c906d950/cluster_31b7b7bd-a015-2a30-f4f5-858b91d603d0/data/data3/current/BP-1891229739-172.17.0.2-1733384688285 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T07:44:53,954 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/879b195a-6d70-226d-0978-9f63c906d950/cluster_31b7b7bd-a015-2a30-f4f5-858b91d603d0/data/data4/current/BP-1891229739-172.17.0.2-1733384688285 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T07:44:53,955 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T07:44:53,959 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@19f40ccf{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T07:44:53,959 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@78de12c2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T07:44:53,959 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T07:44:53,959 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@427407e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T07:44:53,959 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3f7cb10{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/879b195a-6d70-226d-0978-9f63c906d950/hadoop.log.dir/,STOPPED} 2024-12-05T07:44:53,961 WARN [BP-1891229739-172.17.0.2-1733384688285 heartbeating to localhost/127.0.0.1:36391 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T07:44:53,961 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T07:44:53,961 WARN [BP-1891229739-172.17.0.2-1733384688285 heartbeating to localhost/127.0.0.1:36391 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1891229739-172.17.0.2-1733384688285 (Datanode Uuid 2b70ffb7-7322-4ec7-8eb7-6126a2a10100) service to localhost/127.0.0.1:36391 2024-12-05T07:44:53,961 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T07:44:53,961 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/879b195a-6d70-226d-0978-9f63c906d950/cluster_31b7b7bd-a015-2a30-f4f5-858b91d603d0/data/data1/current/BP-1891229739-172.17.0.2-1733384688285 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T07:44:53,962 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/879b195a-6d70-226d-0978-9f63c906d950/cluster_31b7b7bd-a015-2a30-f4f5-858b91d603d0/data/data2/current/BP-1891229739-172.17.0.2-1733384688285 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T07:44:53,962 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T07:44:53,968 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2385e487{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-05T07:44:53,969 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@722f6ac4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T07:44:53,969 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T07:44:53,969 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5fbe2fc0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T07:44:53,969 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@760f4a1c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/879b195a-6d70-226d-0978-9f63c906d950/hadoop.log.dir/,STOPPED} 2024-12-05T07:44:53,977 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-05T07:44:54,002 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-05T07:44:54,010 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=150 (was 88) - Thread LEAK? -, OpenFileDescriptor=516 (was 443) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=335 (was 304) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6735 (was 7009)