2024-12-10 04:57:02,250 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-10 04:57:02,261 main DEBUG Took 0.009551 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-10 04:57:02,261 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-10 04:57:02,262 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-10 04:57:02,263 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-10 04:57:02,264 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 04:57:02,273 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-10 04:57:02,289 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 04:57:02,290 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 04:57:02,291 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 04:57:02,291 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 04:57:02,292 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 04:57:02,292 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 04:57:02,293 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 04:57:02,293 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 04:57:02,293 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 04:57:02,294 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 04:57:02,294 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 04:57:02,295 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 04:57:02,295 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 04:57:02,295 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 04:57:02,296 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 04:57:02,296 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 04:57:02,296 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 04:57:02,297 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 04:57:02,297 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 04:57:02,297 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 04:57:02,298 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 04:57:02,298 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 04:57:02,298 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 04:57:02,299 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 04:57:02,299 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 04:57:02,299 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-10 04:57:02,301 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 04:57:02,302 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-10 04:57:02,303 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-10 04:57:02,304 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-10 04:57:02,305 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-10 04:57:02,305 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-10 04:57:02,313 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-10 04:57:02,315 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-10 04:57:02,317 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-10 04:57:02,318 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-10 04:57:02,318 main DEBUG createAppenders(={Console}) 2024-12-10 04:57:02,319 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-12-10 04:57:02,319 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-10 04:57:02,320 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-12-10 04:57:02,320 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-10 04:57:02,320 main DEBUG OutputStream closed 2024-12-10 04:57:02,320 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-10 04:57:02,321 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-10 04:57:02,321 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-12-10 04:57:02,383 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-10 04:57:02,385 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-10 04:57:02,386 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-10 04:57:02,387 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-10 04:57:02,387 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-10 04:57:02,387 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-10 04:57:02,388 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-10 04:57:02,388 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-10 04:57:02,388 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-10 04:57:02,388 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-10 04:57:02,389 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-10 04:57:02,389 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-10 04:57:02,389 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-10 04:57:02,389 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-10 04:57:02,390 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-10 04:57:02,390 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-10 04:57:02,390 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-10 04:57:02,391 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-10 04:57:02,393 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-10 04:57:02,393 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-12-10 04:57:02,394 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-10 04:57:02,394 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-12-10T04:57:02,408 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-12-10 04:57:02,410 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-10 04:57:02,411 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-10T04:57:02,606 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0 2024-12-10T04:57:02,635 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0/cluster_6c5e2b68-39d0-c07f-3b61-774f8639accf, deleteOnExit=true 2024-12-10T04:57:02,637 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0/test.cache.data in system properties and HBase conf 2024-12-10T04:57:02,638 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0/hadoop.tmp.dir in system properties and HBase conf 2024-12-10T04:57:02,638 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0/hadoop.log.dir in system properties and HBase conf 2024-12-10T04:57:02,639 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-10T04:57:02,639 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-10T04:57:02,640 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-10T04:57:02,750 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-10T04:57:02,862 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-10T04:57:02,867 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-10T04:57:02,868 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-10T04:57:02,868 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-10T04:57:02,869 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-10T04:57:02,870 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-10T04:57:02,870 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-10T04:57:02,871 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-10T04:57:02,871 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-10T04:57:02,872 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-10T04:57:02,872 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0/nfs.dump.dir in system properties and HBase conf 2024-12-10T04:57:02,873 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0/java.io.tmpdir in system properties and HBase conf 2024-12-10T04:57:02,873 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-10T04:57:02,874 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-10T04:57:02,874 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-10T04:57:04,001 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-10T04:57:04,103 INFO [Time-limited test {}] log.Log(170): Logging initialized @2497ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-10T04:57:04,180 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T04:57:04,244 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-10T04:57:04,277 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-10T04:57:04,278 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-10T04:57:04,281 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-10T04:57:04,303 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T04:57:04,307 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0/hadoop.log.dir/,AVAILABLE} 2024-12-10T04:57:04,308 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-10T04:57:04,485 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@76e4c45c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0/java.io.tmpdir/jetty-localhost-36535-hadoop-hdfs-3_4_1-tests_jar-_-any-10244744631108916338/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-10T04:57:04,497 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:36535} 2024-12-10T04:57:04,498 INFO [Time-limited test {}] server.Server(415): Started @2893ms 2024-12-10T04:57:05,158 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T04:57:05,167 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-10T04:57:05,168 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-10T04:57:05,168 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-10T04:57:05,169 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-10T04:57:05,170 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@550154bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0/hadoop.log.dir/,AVAILABLE} 2024-12-10T04:57:05,171 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a2478ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-10T04:57:05,286 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4839957b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0/java.io.tmpdir/jetty-localhost-41223-hadoop-hdfs-3_4_1-tests_jar-_-any-18249989669689735790/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T04:57:05,287 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5306f615{HTTP/1.1, (http/1.1)}{localhost:41223} 2024-12-10T04:57:05,287 INFO [Time-limited test {}] server.Server(415): Started @3683ms 2024-12-10T04:57:05,352 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-10T04:57:05,452 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T04:57:05,460 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-10T04:57:05,465 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-10T04:57:05,465 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-10T04:57:05,466 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-10T04:57:05,467 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6463ad04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0/hadoop.log.dir/,AVAILABLE} 2024-12-10T04:57:05,468 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fa8fa5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-10T04:57:05,579 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1c6b8f01{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0/java.io.tmpdir/jetty-localhost-45985-hadoop-hdfs-3_4_1-tests_jar-_-any-17583686598640568243/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T04:57:05,579 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@11f28dd2{HTTP/1.1, (http/1.1)}{localhost:45985} 2024-12-10T04:57:05,579 INFO [Time-limited test {}] server.Server(415): Started @3975ms 2024-12-10T04:57:05,581 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-10T04:57:05,613 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T04:57:05,617 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-10T04:57:05,618 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-10T04:57:05,619 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-10T04:57:05,619 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-10T04:57:05,619 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c62369b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0/hadoop.log.dir/,AVAILABLE} 2024-12-10T04:57:05,620 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@24f92c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-10T04:57:05,717 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2e59159d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0/java.io.tmpdir/jetty-localhost-40289-hadoop-hdfs-3_4_1-tests_jar-_-any-8445859548062921380/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T04:57:05,718 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@a8e922f{HTTP/1.1, (http/1.1)}{localhost:40289} 2024-12-10T04:57:05,718 INFO [Time-limited test {}] server.Server(415): Started @4114ms 2024-12-10T04:57:05,720 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-10T04:57:07,010 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0/cluster_6c5e2b68-39d0-c07f-3b61-774f8639accf/data/data2/current/BP-1791055191-172.17.0.2-1733806623406/current, will proceed with Du for space computation calculation, 2024-12-10T04:57:07,010 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0/cluster_6c5e2b68-39d0-c07f-3b61-774f8639accf/data/data3/current/BP-1791055191-172.17.0.2-1733806623406/current, will proceed with Du for space computation calculation, 2024-12-10T04:57:07,010 WARN [Thread-122 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0/cluster_6c5e2b68-39d0-c07f-3b61-774f8639accf/data/data1/current/BP-1791055191-172.17.0.2-1733806623406/current, will proceed with Du for space computation calculation, 2024-12-10T04:57:07,010 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0/cluster_6c5e2b68-39d0-c07f-3b61-774f8639accf/data/data4/current/BP-1791055191-172.17.0.2-1733806623406/current, will proceed with Du for space computation calculation, 2024-12-10T04:57:07,047 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-10T04:57:07,048 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-10T04:57:07,099 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x84aceaa3407a9fa2 with lease ID 0x7809515515ca6b50: Processing first storage report for DS-c7cfb6b1-21ac-4270-a3d9-06bcb373aca7 from datanode DatanodeRegistration(127.0.0.1:43693, datanodeUuid=b77a1154-46fd-41e2-9a19-30e23aec2da5, infoPort=39471, infoSecurePort=0, ipcPort=41139, storageInfo=lv=-57;cid=testClusterID;nsid=46193522;c=1733806623406) 2024-12-10T04:57:07,100 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x84aceaa3407a9fa2 with lease ID 0x7809515515ca6b50: from storage DS-c7cfb6b1-21ac-4270-a3d9-06bcb373aca7 node DatanodeRegistration(127.0.0.1:43693, datanodeUuid=b77a1154-46fd-41e2-9a19-30e23aec2da5, infoPort=39471, infoSecurePort=0, ipcPort=41139, storageInfo=lv=-57;cid=testClusterID;nsid=46193522;c=1733806623406), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-10T04:57:07,100 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbffbfa31574e28b with lease ID 0x7809515515ca6b51: Processing first storage report for DS-937ffc99-ba75-4f67-8823-bfaa08d3a371 from datanode DatanodeRegistration(127.0.0.1:44817, datanodeUuid=029eb7d9-cf8c-4f99-a348-85773f3a1a70, infoPort=39733, infoSecurePort=0, ipcPort=39403, storageInfo=lv=-57;cid=testClusterID;nsid=46193522;c=1733806623406) 2024-12-10T04:57:07,101 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbffbfa31574e28b with lease ID 0x7809515515ca6b51: from storage DS-937ffc99-ba75-4f67-8823-bfaa08d3a371 node DatanodeRegistration(127.0.0.1:44817, datanodeUuid=029eb7d9-cf8c-4f99-a348-85773f3a1a70, infoPort=39733, infoSecurePort=0, ipcPort=39403, storageInfo=lv=-57;cid=testClusterID;nsid=46193522;c=1733806623406), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-10T04:57:07,101 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x84aceaa3407a9fa2 with lease ID 0x7809515515ca6b50: Processing first storage report for DS-0f2ef228-792b-4fd1-a631-ed6225d9fca9 from datanode DatanodeRegistration(127.0.0.1:43693, datanodeUuid=b77a1154-46fd-41e2-9a19-30e23aec2da5, infoPort=39471, infoSecurePort=0, ipcPort=41139, storageInfo=lv=-57;cid=testClusterID;nsid=46193522;c=1733806623406) 2024-12-10T04:57:07,101 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x84aceaa3407a9fa2 with lease ID 0x7809515515ca6b50: from storage DS-0f2ef228-792b-4fd1-a631-ed6225d9fca9 node DatanodeRegistration(127.0.0.1:43693, datanodeUuid=b77a1154-46fd-41e2-9a19-30e23aec2da5, infoPort=39471, infoSecurePort=0, ipcPort=41139, storageInfo=lv=-57;cid=testClusterID;nsid=46193522;c=1733806623406), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-10T04:57:07,101 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbffbfa31574e28b with lease ID 0x7809515515ca6b51: Processing first storage report for DS-fbcf982f-d2d7-48cd-92e3-064fd618cc85 from datanode DatanodeRegistration(127.0.0.1:44817, datanodeUuid=029eb7d9-cf8c-4f99-a348-85773f3a1a70, infoPort=39733, infoSecurePort=0, ipcPort=39403, storageInfo=lv=-57;cid=testClusterID;nsid=46193522;c=1733806623406) 2024-12-10T04:57:07,101 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbffbfa31574e28b with lease ID 0x7809515515ca6b51: from storage DS-fbcf982f-d2d7-48cd-92e3-064fd618cc85 node DatanodeRegistration(127.0.0.1:44817, datanodeUuid=029eb7d9-cf8c-4f99-a348-85773f3a1a70, infoPort=39733, infoSecurePort=0, ipcPort=39403, storageInfo=lv=-57;cid=testClusterID;nsid=46193522;c=1733806623406), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-10T04:57:07,111 WARN [Thread-142 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0/cluster_6c5e2b68-39d0-c07f-3b61-774f8639accf/data/data5/current/BP-1791055191-172.17.0.2-1733806623406/current, will proceed with Du for space computation calculation, 2024-12-10T04:57:07,111 WARN [Thread-143 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0/cluster_6c5e2b68-39d0-c07f-3b61-774f8639accf/data/data6/current/BP-1791055191-172.17.0.2-1733806623406/current, will proceed with Du for space computation calculation, 2024-12-10T04:57:07,133 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-10T04:57:07,138 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x164d4455f8b48370 with lease ID 0x7809515515ca6b52: Processing first storage report for DS-0b8ed2a7-6605-4265-866d-1ebfe87d16ab from datanode DatanodeRegistration(127.0.0.1:45993, datanodeUuid=63c63997-802b-4298-97ae-6b2172b1f39f, infoPort=39765, infoSecurePort=0, ipcPort=37371, storageInfo=lv=-57;cid=testClusterID;nsid=46193522;c=1733806623406) 2024-12-10T04:57:07,138 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x164d4455f8b48370 with lease ID 0x7809515515ca6b52: from storage DS-0b8ed2a7-6605-4265-866d-1ebfe87d16ab node DatanodeRegistration(127.0.0.1:45993, datanodeUuid=63c63997-802b-4298-97ae-6b2172b1f39f, infoPort=39765, infoSecurePort=0, ipcPort=37371, storageInfo=lv=-57;cid=testClusterID;nsid=46193522;c=1733806623406), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-10T04:57:07,139 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x164d4455f8b48370 with lease ID 0x7809515515ca6b52: Processing first storage report for DS-2e733722-6c88-41df-9c9d-1b1d657b1fdb from datanode DatanodeRegistration(127.0.0.1:45993, datanodeUuid=63c63997-802b-4298-97ae-6b2172b1f39f, infoPort=39765, infoSecurePort=0, ipcPort=37371, storageInfo=lv=-57;cid=testClusterID;nsid=46193522;c=1733806623406) 2024-12-10T04:57:07,139 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x164d4455f8b48370 with lease ID 0x7809515515ca6b52: from storage DS-2e733722-6c88-41df-9c9d-1b1d657b1fdb node DatanodeRegistration(127.0.0.1:45993, datanodeUuid=63c63997-802b-4298-97ae-6b2172b1f39f, infoPort=39765, infoSecurePort=0, ipcPort=37371, storageInfo=lv=-57;cid=testClusterID;nsid=46193522;c=1733806623406), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-10T04:57:07,174 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0 2024-12-10T04:57:07,251 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-12-10T04:57:07,309 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=157, OpenFileDescriptor=391, MaxFileDescriptor=1048576, SystemLoadAverage=255, ProcessCount=11, AvailableMemoryMB=5647 2024-12-10T04:57:07,311 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-10T04:57:07,318 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-12-10T04:57:07,415 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0/cluster_6c5e2b68-39d0-c07f-3b61-774f8639accf/zookeeper_0, clientPort=49472, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0/cluster_6c5e2b68-39d0-c07f-3b61-774f8639accf/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0/cluster_6c5e2b68-39d0-c07f-3b61-774f8639accf/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-10T04:57:07,425 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49472 2024-12-10T04:57:07,433 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T04:57:07,435 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T04:57:07,515 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T04:57:07,516 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T04:57:07,568 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-841780559_22 at /127.0.0.1:35820 [Receiving block BP-1791055191-172.17.0.2-1733806623406:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:45993:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35820 dst: /127.0.0.1:45993 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T04:57:07,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45993 is added to blk_-9223372036854775792_1002 (size=7) 2024-12-10T04:57:07,985 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T04:57:07,996 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34 with version=8 2024-12-10T04:57:07,997 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/hbase-staging 2024-12-10T04:57:08,082 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-10T04:57:08,321 INFO [Time-limited test {}] client.ConnectionUtils(128): master/6578523f4421:0 server-side Connection retries=45 2024-12-10T04:57:08,329 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T04:57:08,330 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-10T04:57:08,334 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-10T04:57:08,334 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T04:57:08,334 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-10T04:57:08,447 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-10T04:57:08,514 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-10T04:57:08,522 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-10T04:57:08,525 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-10T04:57:08,551 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 80865 (auto-detected) 2024-12-10T04:57:08,552 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-10T04:57:08,572 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:32843 2024-12-10T04:57:08,592 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:32843 connecting to ZooKeeper ensemble=127.0.0.1:49472 2024-12-10T04:57:08,716 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:328430x0, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-10T04:57:08,720 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:32843-0x1000e15a10d0000 connected 2024-12-10T04:57:08,835 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T04:57:08,840 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T04:57:08,856 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:32843-0x1000e15a10d0000, quorum=127.0.0.1:49472, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T04:57:08,864 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34, hbase.cluster.distributed=false 2024-12-10T04:57:08,901 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:32843-0x1000e15a10d0000, quorum=127.0.0.1:49472, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-10T04:57:08,909 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=32843 2024-12-10T04:57:08,910 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=32843 2024-12-10T04:57:08,910 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=32843 2024-12-10T04:57:08,911 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=32843 2024-12-10T04:57:08,911 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=32843 2024-12-10T04:57:09,035 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/6578523f4421:0 server-side Connection retries=45 2024-12-10T04:57:09,036 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T04:57:09,036 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-10T04:57:09,036 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-10T04:57:09,036 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T04:57:09,037 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-10T04:57:09,039 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-10T04:57:09,041 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-10T04:57:09,042 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38527 2024-12-10T04:57:09,043 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38527 connecting to ZooKeeper ensemble=127.0.0.1:49472 2024-12-10T04:57:09,044 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T04:57:09,047 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T04:57:09,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:385270x0, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-10T04:57:09,063 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:385270x0, quorum=127.0.0.1:49472, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T04:57:09,063 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38527-0x1000e15a10d0001 connected 2024-12-10T04:57:09,067 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-10T04:57:09,079 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-10T04:57:09,082 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38527-0x1000e15a10d0001, quorum=127.0.0.1:49472, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-10T04:57:09,089 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38527-0x1000e15a10d0001, quorum=127.0.0.1:49472, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-10T04:57:09,090 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38527 2024-12-10T04:57:09,091 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38527 2024-12-10T04:57:09,091 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38527 2024-12-10T04:57:09,092 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38527 2024-12-10T04:57:09,092 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38527 2024-12-10T04:57:09,106 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/6578523f4421:0 server-side Connection retries=45 2024-12-10T04:57:09,107 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T04:57:09,107 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-10T04:57:09,107 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-10T04:57:09,107 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T04:57:09,108 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-10T04:57:09,108 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-10T04:57:09,108 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-10T04:57:09,109 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45887 2024-12-10T04:57:09,111 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45887 connecting to ZooKeeper ensemble=127.0.0.1:49472 2024-12-10T04:57:09,112 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T04:57:09,115 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T04:57:09,184 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:458870x0, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-10T04:57:09,185 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45887-0x1000e15a10d0002 connected 2024-12-10T04:57:09,185 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45887-0x1000e15a10d0002, quorum=127.0.0.1:49472, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T04:57:09,186 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-10T04:57:09,188 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-10T04:57:09,189 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45887-0x1000e15a10d0002, quorum=127.0.0.1:49472, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-10T04:57:09,191 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45887-0x1000e15a10d0002, quorum=127.0.0.1:49472, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-10T04:57:09,191 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45887 2024-12-10T04:57:09,192 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45887 2024-12-10T04:57:09,192 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45887 2024-12-10T04:57:09,196 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45887 2024-12-10T04:57:09,197 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45887 2024-12-10T04:57:09,213 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/6578523f4421:0 server-side Connection retries=45 2024-12-10T04:57:09,214 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T04:57:09,214 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-10T04:57:09,214 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-10T04:57:09,214 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T04:57:09,215 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-10T04:57:09,215 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-10T04:57:09,215 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-10T04:57:09,216 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37799 2024-12-10T04:57:09,218 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37799 connecting to ZooKeeper ensemble=127.0.0.1:49472 2024-12-10T04:57:09,220 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T04:57:09,223 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T04:57:09,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:377990x0, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-10T04:57:09,254 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37799-0x1000e15a10d0003 connected 2024-12-10T04:57:09,254 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37799-0x1000e15a10d0003, quorum=127.0.0.1:49472, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T04:57:09,255 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-10T04:57:09,261 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-10T04:57:09,263 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37799-0x1000e15a10d0003, quorum=127.0.0.1:49472, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-10T04:57:09,265 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37799-0x1000e15a10d0003, quorum=127.0.0.1:49472, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-10T04:57:09,272 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37799 2024-12-10T04:57:09,272 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37799 2024-12-10T04:57:09,273 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37799 2024-12-10T04:57:09,281 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37799 2024-12-10T04:57:09,282 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37799 2024-12-10T04:57:09,302 DEBUG [M:0;6578523f4421:32843 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;6578523f4421:32843 2024-12-10T04:57:09,304 INFO [master/6578523f4421:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/6578523f4421,32843,1733806628164 2024-12-10T04:57:09,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37799-0x1000e15a10d0003, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T04:57:09,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38527-0x1000e15a10d0001, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T04:57:09,328 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45887-0x1000e15a10d0002, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T04:57:09,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32843-0x1000e15a10d0000, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T04:57:09,332 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:32843-0x1000e15a10d0000, quorum=127.0.0.1:49472, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/6578523f4421,32843,1733806628164 2024-12-10T04:57:09,358 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37799-0x1000e15a10d0003, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-10T04:57:09,358 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38527-0x1000e15a10d0001, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-10T04:57:09,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37799-0x1000e15a10d0003, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:09,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32843-0x1000e15a10d0000, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:09,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38527-0x1000e15a10d0001, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:09,358 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45887-0x1000e15a10d0002, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-10T04:57:09,359 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45887-0x1000e15a10d0002, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:09,362 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:32843-0x1000e15a10d0000, quorum=127.0.0.1:49472, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-10T04:57:09,364 INFO [master/6578523f4421:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/6578523f4421,32843,1733806628164 from backup master directory 2024-12-10T04:57:09,379 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38527-0x1000e15a10d0001, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T04:57:09,379 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45887-0x1000e15a10d0002, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T04:57:09,380 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37799-0x1000e15a10d0003, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T04:57:09,380 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32843-0x1000e15a10d0000, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/6578523f4421,32843,1733806628164 2024-12-10T04:57:09,380 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32843-0x1000e15a10d0000, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T04:57:09,382 WARN [master/6578523f4421:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-10T04:57:09,382 INFO [master/6578523f4421:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=6578523f4421,32843,1733806628164 2024-12-10T04:57:09,384 INFO [master/6578523f4421:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-10T04:57:09,386 INFO [master/6578523f4421:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-10T04:57:09,457 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/hbase.id] with ID: 8129eecb-9fee-458b-a93b-7fd55d04eeb5 2024-12-10T04:57:09,457 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/.tmp/hbase.id 2024-12-10T04:57:09,466 WARN [master/6578523f4421:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T04:57:09,467 WARN [master/6578523f4421:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T04:57:09,476 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-841780559_22 at /127.0.0.1:32940 [Receiving block BP-1791055191-172.17.0.2-1733806623406:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:44817:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:32940 dst: /127.0.0.1:44817 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T04:57:09,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44817 is added to blk_-9223372036854775776_1004 (size=42) 2024-12-10T04:57:09,485 WARN [master/6578523f4421:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T04:57:09,485 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/.tmp/hbase.id]:[hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/hbase.id] 2024-12-10T04:57:09,536 INFO [master/6578523f4421:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T04:57:09,540 INFO [master/6578523f4421:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-10T04:57:09,557 INFO [master/6578523f4421:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 15ms. 2024-12-10T04:57:09,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37799-0x1000e15a10d0003, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:09,569 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45887-0x1000e15a10d0002, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:09,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38527-0x1000e15a10d0001, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:09,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32843-0x1000e15a10d0000, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:09,583 WARN [master/6578523f4421:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T04:57:09,583 WARN [master/6578523f4421:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T04:57:09,586 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-841780559_22 at /127.0.0.1:47364 [Receiving block BP-1791055191-172.17.0.2-1733806623406:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:43693:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47364 dst: /127.0.0.1:43693 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T04:57:09,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43693 is added to blk_-9223372036854775760_1006 (size=196) 2024-12-10T04:57:09,591 WARN [master/6578523f4421:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T04:57:09,605 INFO [master/6578523f4421:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T04:57:09,607 INFO [master/6578523f4421:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-10T04:57:09,612 INFO [master/6578523f4421:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T04:57:09,636 WARN [master/6578523f4421:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T04:57:09,636 WARN [master/6578523f4421:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T04:57:09,639 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-841780559_22 at /127.0.0.1:47378 [Receiving block BP-1791055191-172.17.0.2-1733806623406:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:43693:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47378 dst: /127.0.0.1:43693 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T04:57:09,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43693 is added to blk_-9223372036854775744_1008 (size=1189) 2024-12-10T04:57:09,645 WARN [master/6578523f4421:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T04:57:09,661 INFO [master/6578523f4421:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/MasterData/data/master/store 2024-12-10T04:57:09,675 WARN [master/6578523f4421:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T04:57:09,675 WARN [master/6578523f4421:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T04:57:09,679 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-841780559_22 at /127.0.0.1:47396 [Receiving block BP-1791055191-172.17.0.2-1733806623406:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:43693:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47396 dst: /127.0.0.1:43693 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T04:57:09,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43693 is added to blk_-9223372036854775728_1010 (size=34) 2024-12-10T04:57:09,684 WARN [master/6578523f4421:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T04:57:09,688 INFO [master/6578523f4421:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-10T04:57:09,690 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T04:57:09,691 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-10T04:57:09,691 INFO [master/6578523f4421:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T04:57:09,691 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T04:57:09,692 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-10T04:57:09,693 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T04:57:09,693 INFO [master/6578523f4421:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T04:57:09,693 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733806629691Disabling compacts and flushes for region at 1733806629691Disabling writes for close at 1733806629692 (+1 ms)Writing region close event to WAL at 1733806629693 (+1 ms)Closed at 1733806629693 2024-12-10T04:57:09,695 WARN [master/6578523f4421:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/MasterData/data/master/store/.initializing 2024-12-10T04:57:09,695 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/MasterData/WALs/6578523f4421,32843,1733806628164 2024-12-10T04:57:09,703 INFO [master/6578523f4421:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T04:57:09,716 INFO [master/6578523f4421:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6578523f4421%2C32843%2C1733806628164, suffix=, logDir=hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/MasterData/WALs/6578523f4421,32843,1733806628164, archiveDir=hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/MasterData/oldWALs, maxLogs=10 2024-12-10T04:57:09,740 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/MasterData/WALs/6578523f4421,32843,1733806628164/6578523f4421%2C32843%2C1733806628164.1733806629720, exclude list is [], retry=0 2024-12-10T04:57:09,758 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T04:57:09,759 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44817,DS-937ffc99-ba75-4f67-8823-bfaa08d3a371,DISK] 2024-12-10T04:57:09,759 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43693,DS-c7cfb6b1-21ac-4270-a3d9-06bcb373aca7,DISK] 2024-12-10T04:57:09,759 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45993,DS-0b8ed2a7-6605-4265-866d-1ebfe87d16ab,DISK] 2024-12-10T04:57:09,762 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-10T04:57:09,795 INFO [master/6578523f4421:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/MasterData/WALs/6578523f4421,32843,1733806628164/6578523f4421%2C32843%2C1733806628164.1733806629720 2024-12-10T04:57:09,796 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39733:39733),(127.0.0.1/127.0.0.1:39471:39471),(127.0.0.1/127.0.0.1:39765:39765)] 2024-12-10T04:57:09,796 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-10T04:57:09,797 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T04:57:09,799 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T04:57:09,800 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T04:57:09,834 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-10T04:57:09,855 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-10T04:57:09,858 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T04:57:09,861 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T04:57:09,861 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-10T04:57:09,864 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-10T04:57:09,864 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T04:57:09,865 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T04:57:09,866 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-10T04:57:09,868 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-10T04:57:09,868 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T04:57:09,869 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T04:57:09,869 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-10T04:57:09,871 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-10T04:57:09,872 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T04:57:09,872 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T04:57:09,873 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T04:57:09,876 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-10T04:57:09,877 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-10T04:57:09,882 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T04:57:09,883 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T04:57:09,886 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-10T04:57:09,889 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T04:57:09,895 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T04:57:09,896 INFO [master/6578523f4421:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60512572, jitterRate=-0.09829241037368774}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-10T04:57:09,901 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733806629810Initializing all the Stores at 1733806629812 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733806629812Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733806629813 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733806629813Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733806629813Cleaning up temporary data from old regions at 1733806629883 (+70 ms)Region opened successfully at 1733806629901 (+18 ms) 2024-12-10T04:57:09,903 INFO [master/6578523f4421:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-10T04:57:09,933 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3cb7762, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6578523f4421/172.17.0.2:0 2024-12-10T04:57:09,959 INFO [master/6578523f4421:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-10T04:57:09,968 INFO [master/6578523f4421:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-10T04:57:09,968 INFO [master/6578523f4421:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-10T04:57:09,970 INFO [master/6578523f4421:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-10T04:57:09,971 INFO [master/6578523f4421:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-10T04:57:09,975 INFO [master/6578523f4421:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 3 msec 2024-12-10T04:57:09,975 INFO [master/6578523f4421:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-10T04:57:09,997 INFO [master/6578523f4421:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-10T04:57:10,004 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32843-0x1000e15a10d0000, quorum=127.0.0.1:49472, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-10T04:57:10,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43693 is added to blk_-9223372036854775788_1002 (size=7) 2024-12-10T04:57:10,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44817 is added to blk_-9223372036854775789_1002 (size=7) 2024-12-10T04:57:10,104 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-10T04:57:10,107 INFO [master/6578523f4421:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-10T04:57:10,108 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32843-0x1000e15a10d0000, quorum=127.0.0.1:49472, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-10T04:57:10,116 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-10T04:57:10,118 INFO [master/6578523f4421:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-10T04:57:10,122 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32843-0x1000e15a10d0000, quorum=127.0.0.1:49472, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-10T04:57:10,126 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-10T04:57:10,128 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32843-0x1000e15a10d0000, quorum=127.0.0.1:49472, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-10T04:57:10,137 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-10T04:57:10,153 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32843-0x1000e15a10d0000, quorum=127.0.0.1:49472, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-10T04:57:10,158 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-10T04:57:10,169 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37799-0x1000e15a10d0003, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-10T04:57:10,169 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38527-0x1000e15a10d0001, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-10T04:57:10,169 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32843-0x1000e15a10d0000, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-10T04:57:10,169 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45887-0x1000e15a10d0002, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-10T04:57:10,169 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38527-0x1000e15a10d0001, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:10,169 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45887-0x1000e15a10d0002, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:10,169 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37799-0x1000e15a10d0003, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:10,169 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32843-0x1000e15a10d0000, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:10,175 INFO [master/6578523f4421:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=6578523f4421,32843,1733806628164, sessionid=0x1000e15a10d0000, setting cluster-up flag (Was=false) 2024-12-10T04:57:10,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32843-0x1000e15a10d0000, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:10,211 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45887-0x1000e15a10d0002, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:10,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37799-0x1000e15a10d0003, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:10,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38527-0x1000e15a10d0001, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:10,242 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-10T04:57:10,246 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6578523f4421,32843,1733806628164 2024-12-10T04:57:10,274 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38527-0x1000e15a10d0001, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:10,274 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37799-0x1000e15a10d0003, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:10,274 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32843-0x1000e15a10d0000, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:10,274 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45887-0x1000e15a10d0002, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:10,305 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-10T04:57:10,308 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6578523f4421,32843,1733806628164 2024-12-10T04:57:10,316 INFO [master/6578523f4421:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-10T04:57:10,379 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-10T04:57:10,386 INFO [RS:1;6578523f4421:45887 {}] regionserver.HRegionServer(746): ClusterId : 8129eecb-9fee-458b-a93b-7fd55d04eeb5 2024-12-10T04:57:10,386 INFO [RS:2;6578523f4421:37799 {}] regionserver.HRegionServer(746): ClusterId : 8129eecb-9fee-458b-a93b-7fd55d04eeb5 2024-12-10T04:57:10,386 INFO [RS:0;6578523f4421:38527 {}] regionserver.HRegionServer(746): ClusterId : 8129eecb-9fee-458b-a93b-7fd55d04eeb5 2024-12-10T04:57:10,388 INFO [master/6578523f4421:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-10T04:57:10,389 DEBUG [RS:2;6578523f4421:37799 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-10T04:57:10,389 DEBUG [RS:0;6578523f4421:38527 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-10T04:57:10,389 DEBUG [RS:1;6578523f4421:45887 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-10T04:57:10,394 INFO [master/6578523f4421:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-10T04:57:10,399 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 6578523f4421,32843,1733806628164 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-10T04:57:10,402 DEBUG [RS:1;6578523f4421:45887 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-10T04:57:10,402 DEBUG [RS:2;6578523f4421:37799 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-10T04:57:10,402 DEBUG [RS:0;6578523f4421:38527 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-10T04:57:10,403 DEBUG [RS:1;6578523f4421:45887 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-10T04:57:10,403 DEBUG [RS:2;6578523f4421:37799 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-10T04:57:10,403 DEBUG [RS:0;6578523f4421:38527 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-10T04:57:10,412 DEBUG [RS:1;6578523f4421:45887 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-10T04:57:10,412 DEBUG [RS:0;6578523f4421:38527 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-10T04:57:10,412 DEBUG [RS:2;6578523f4421:37799 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-10T04:57:10,413 DEBUG [RS:2;6578523f4421:37799 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2051dc1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6578523f4421/172.17.0.2:0 2024-12-10T04:57:10,413 DEBUG [RS:1;6578523f4421:45887 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32dd49ea, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6578523f4421/172.17.0.2:0 2024-12-10T04:57:10,413 DEBUG [RS:0;6578523f4421:38527 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c96d143, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6578523f4421/172.17.0.2:0 2024-12-10T04:57:10,413 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/6578523f4421:0, corePoolSize=5, maxPoolSize=5 2024-12-10T04:57:10,413 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/6578523f4421:0, corePoolSize=5, maxPoolSize=5 2024-12-10T04:57:10,414 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/6578523f4421:0, corePoolSize=5, maxPoolSize=5 2024-12-10T04:57:10,414 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/6578523f4421:0, corePoolSize=5, maxPoolSize=5 2024-12-10T04:57:10,414 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/6578523f4421:0, corePoolSize=10, maxPoolSize=10 2024-12-10T04:57:10,414 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:10,414 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/6578523f4421:0, corePoolSize=2, maxPoolSize=2 2024-12-10T04:57:10,414 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:10,421 INFO [master/6578523f4421:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733806660421 2024-12-10T04:57:10,423 INFO [master/6578523f4421:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-10T04:57:10,424 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-10T04:57:10,424 INFO [master/6578523f4421:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-10T04:57:10,424 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-10T04:57:10,428 DEBUG [RS:1;6578523f4421:45887 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;6578523f4421:45887 2024-12-10T04:57:10,429 DEBUG [RS:2;6578523f4421:37799 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;6578523f4421:37799 2024-12-10T04:57:10,429 INFO [master/6578523f4421:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-10T04:57:10,430 INFO [master/6578523f4421:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-10T04:57:10,430 INFO [master/6578523f4421:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-10T04:57:10,430 INFO [master/6578523f4421:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-10T04:57:10,431 DEBUG [RS:0;6578523f4421:38527 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;6578523f4421:38527 2024-12-10T04:57:10,431 INFO [RS:2;6578523f4421:37799 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-10T04:57:10,431 INFO [RS:1;6578523f4421:45887 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-10T04:57:10,431 INFO [RS:0;6578523f4421:38527 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-10T04:57:10,431 INFO [RS:2;6578523f4421:37799 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-10T04:57:10,431 INFO [RS:0;6578523f4421:38527 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-10T04:57:10,431 INFO [RS:1;6578523f4421:45887 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-10T04:57:10,432 DEBUG [RS:0;6578523f4421:38527 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-10T04:57:10,432 DEBUG [RS:1;6578523f4421:45887 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-10T04:57:10,432 DEBUG [RS:2;6578523f4421:37799 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-10T04:57:10,432 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T04:57:10,433 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-10T04:57:10,434 INFO [RS:0;6578523f4421:38527 {}] regionserver.HRegionServer(2659): reportForDuty to master=6578523f4421,32843,1733806628164 with port=38527, startcode=1733806628988 2024-12-10T04:57:10,434 INFO [RS:1;6578523f4421:45887 {}] regionserver.HRegionServer(2659): reportForDuty to master=6578523f4421,32843,1733806628164 with port=45887, startcode=1733806629106 2024-12-10T04:57:10,433 INFO [master/6578523f4421:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:10,436 INFO [RS:2;6578523f4421:37799 {}] regionserver.HRegionServer(2659): reportForDuty to master=6578523f4421,32843,1733806628164 with port=37799, startcode=1733806629213 2024-12-10T04:57:10,438 INFO [master/6578523f4421:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-10T04:57:10,440 INFO [master/6578523f4421:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-10T04:57:10,440 INFO [master/6578523f4421:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-10T04:57:10,446 INFO [master/6578523f4421:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-10T04:57:10,446 INFO [master/6578523f4421:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-10T04:57:10,447 DEBUG [RS:2;6578523f4421:37799 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-10T04:57:10,447 DEBUG [RS:0;6578523f4421:38527 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-10T04:57:10,447 DEBUG [RS:1;6578523f4421:45887 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-10T04:57:10,448 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/6578523f4421:0:becomeActiveMaster-HFileCleaner.large.0-1733806630448,5,FailOnTimeoutGroup] 2024-12-10T04:57:10,449 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T04:57:10,449 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T04:57:10,452 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/6578523f4421:0:becomeActiveMaster-HFileCleaner.small.0-1733806630448,5,FailOnTimeoutGroup] 2024-12-10T04:57:10,452 INFO [master/6578523f4421:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:10,452 INFO [master/6578523f4421:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-10T04:57:10,453 INFO [master/6578523f4421:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:10,454 INFO [master/6578523f4421:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:10,457 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-841780559_22 at /127.0.0.1:45360 [Receiving block BP-1791055191-172.17.0.2-1733806623406:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:43693:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45360 dst: /127.0.0.1:43693 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T04:57:10,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43693 is added to blk_-9223372036854775712_1013 (size=1321) 2024-12-10T04:57:10,480 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55493, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-10T04:57:10,480 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46001, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-10T04:57:10,480 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41059, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-10T04:57:10,486 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32843 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 6578523f4421,38527,1733806628988 2024-12-10T04:57:10,489 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32843 {}] master.ServerManager(517): Registering regionserver=6578523f4421,38527,1733806628988 2024-12-10T04:57:10,502 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32843 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 6578523f4421,45887,1733806629106 2024-12-10T04:57:10,502 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32843 {}] master.ServerManager(517): Registering regionserver=6578523f4421,45887,1733806629106 2024-12-10T04:57:10,505 DEBUG [RS:0;6578523f4421:38527 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34 2024-12-10T04:57:10,506 DEBUG [RS:0;6578523f4421:38527 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42739 2024-12-10T04:57:10,506 DEBUG [RS:0;6578523f4421:38527 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-10T04:57:10,507 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32843 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 6578523f4421,37799,1733806629213 2024-12-10T04:57:10,507 DEBUG [RS:1;6578523f4421:45887 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34 2024-12-10T04:57:10,507 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32843 {}] master.ServerManager(517): Registering regionserver=6578523f4421,37799,1733806629213 2024-12-10T04:57:10,507 DEBUG [RS:1;6578523f4421:45887 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42739 2024-12-10T04:57:10,507 DEBUG [RS:1;6578523f4421:45887 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-10T04:57:10,510 DEBUG [RS:2;6578523f4421:37799 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34 2024-12-10T04:57:10,510 DEBUG [RS:2;6578523f4421:37799 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42739 2024-12-10T04:57:10,510 DEBUG [RS:2;6578523f4421:37799 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-10T04:57:10,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32843-0x1000e15a10d0000, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-10T04:57:10,591 DEBUG [RS:1;6578523f4421:45887 {}] zookeeper.ZKUtil(111): regionserver:45887-0x1000e15a10d0002, quorum=127.0.0.1:49472, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/6578523f4421,45887,1733806629106 2024-12-10T04:57:10,591 DEBUG [RS:0;6578523f4421:38527 {}] zookeeper.ZKUtil(111): regionserver:38527-0x1000e15a10d0001, quorum=127.0.0.1:49472, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/6578523f4421,38527,1733806628988 2024-12-10T04:57:10,591 WARN [RS:0;6578523f4421:38527 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-10T04:57:10,591 WARN [RS:1;6578523f4421:45887 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-10T04:57:10,592 INFO [RS:1;6578523f4421:45887 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T04:57:10,592 INFO [RS:0;6578523f4421:38527 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T04:57:10,593 DEBUG [RS:1;6578523f4421:45887 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/WALs/6578523f4421,45887,1733806629106 2024-12-10T04:57:10,593 DEBUG [RS:0;6578523f4421:38527 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/WALs/6578523f4421,38527,1733806628988 2024-12-10T04:57:10,594 DEBUG [RS:2;6578523f4421:37799 {}] zookeeper.ZKUtil(111): regionserver:37799-0x1000e15a10d0003, quorum=127.0.0.1:49472, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/6578523f4421,37799,1733806629213 2024-12-10T04:57:10,594 WARN [RS:2;6578523f4421:37799 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-10T04:57:10,595 INFO [RS:2;6578523f4421:37799 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T04:57:10,595 DEBUG [RS:2;6578523f4421:37799 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/WALs/6578523f4421,37799,1733806629213 2024-12-10T04:57:10,596 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [6578523f4421,37799,1733806629213] 2024-12-10T04:57:10,596 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [6578523f4421,45887,1733806629106] 2024-12-10T04:57:10,596 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [6578523f4421,38527,1733806628988] 2024-12-10T04:57:10,618 INFO [RS:1;6578523f4421:45887 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-10T04:57:10,618 INFO [RS:2;6578523f4421:37799 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-10T04:57:10,618 INFO [RS:0;6578523f4421:38527 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-10T04:57:10,630 INFO [RS:1;6578523f4421:45887 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-10T04:57:10,630 INFO [RS:0;6578523f4421:38527 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-10T04:57:10,631 INFO [RS:2;6578523f4421:37799 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-10T04:57:10,635 INFO [RS:1;6578523f4421:45887 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-10T04:57:10,635 INFO [RS:2;6578523f4421:37799 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-10T04:57:10,635 INFO [RS:0;6578523f4421:38527 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-10T04:57:10,636 INFO [RS:1;6578523f4421:45887 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:10,636 INFO [RS:2;6578523f4421:37799 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:10,636 INFO [RS:0;6578523f4421:38527 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:10,637 INFO [RS:1;6578523f4421:45887 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-10T04:57:10,637 INFO [RS:0;6578523f4421:38527 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-10T04:57:10,638 INFO [RS:2;6578523f4421:37799 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-10T04:57:10,643 INFO [RS:1;6578523f4421:45887 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-10T04:57:10,643 INFO [RS:0;6578523f4421:38527 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-10T04:57:10,643 INFO [RS:2;6578523f4421:37799 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-10T04:57:10,645 INFO [RS:1;6578523f4421:45887 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:10,645 INFO [RS:2;6578523f4421:37799 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:10,645 INFO [RS:0;6578523f4421:38527 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:10,645 DEBUG [RS:1;6578523f4421:45887 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:10,645 DEBUG [RS:0;6578523f4421:38527 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:10,645 DEBUG [RS:2;6578523f4421:37799 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:10,646 DEBUG [RS:0;6578523f4421:38527 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:10,646 DEBUG [RS:1;6578523f4421:45887 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:10,646 DEBUG [RS:2;6578523f4421:37799 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:10,646 DEBUG [RS:0;6578523f4421:38527 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:10,646 DEBUG [RS:1;6578523f4421:45887 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:10,646 DEBUG [RS:2;6578523f4421:37799 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:10,646 DEBUG [RS:1;6578523f4421:45887 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:10,646 DEBUG [RS:0;6578523f4421:38527 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:10,646 DEBUG [RS:2;6578523f4421:37799 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:10,646 DEBUG [RS:1;6578523f4421:45887 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:10,646 DEBUG [RS:0;6578523f4421:38527 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:10,646 DEBUG [RS:2;6578523f4421:37799 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:10,646 DEBUG [RS:1;6578523f4421:45887 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/6578523f4421:0, corePoolSize=2, maxPoolSize=2 2024-12-10T04:57:10,646 DEBUG [RS:0;6578523f4421:38527 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/6578523f4421:0, corePoolSize=2, maxPoolSize=2 2024-12-10T04:57:10,646 DEBUG [RS:2;6578523f4421:37799 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/6578523f4421:0, corePoolSize=2, maxPoolSize=2 2024-12-10T04:57:10,646 DEBUG [RS:1;6578523f4421:45887 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:10,646 DEBUG [RS:0;6578523f4421:38527 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:10,646 DEBUG [RS:2;6578523f4421:37799 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:10,647 DEBUG [RS:2;6578523f4421:37799 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:10,647 DEBUG [RS:0;6578523f4421:38527 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:10,647 DEBUG [RS:1;6578523f4421:45887 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:10,647 DEBUG [RS:2;6578523f4421:37799 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:10,647 DEBUG [RS:1;6578523f4421:45887 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:10,647 DEBUG [RS:0;6578523f4421:38527 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:10,647 DEBUG [RS:2;6578523f4421:37799 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:10,647 DEBUG [RS:0;6578523f4421:38527 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:10,647 DEBUG [RS:1;6578523f4421:45887 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:10,647 DEBUG [RS:0;6578523f4421:38527 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:10,647 DEBUG [RS:1;6578523f4421:45887 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:10,647 DEBUG [RS:2;6578523f4421:37799 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:10,647 DEBUG [RS:1;6578523f4421:45887 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:10,647 DEBUG [RS:0;6578523f4421:38527 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:10,647 DEBUG [RS:2;6578523f4421:37799 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:10,647 DEBUG [RS:2;6578523f4421:37799 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/6578523f4421:0, corePoolSize=3, maxPoolSize=3 2024-12-10T04:57:10,647 DEBUG [RS:0;6578523f4421:38527 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/6578523f4421:0, corePoolSize=3, maxPoolSize=3 2024-12-10T04:57:10,647 DEBUG [RS:1;6578523f4421:45887 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/6578523f4421:0, corePoolSize=3, maxPoolSize=3 2024-12-10T04:57:10,648 DEBUG [RS:1;6578523f4421:45887 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/6578523f4421:0, corePoolSize=3, maxPoolSize=3 2024-12-10T04:57:10,648 DEBUG [RS:2;6578523f4421:37799 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/6578523f4421:0, corePoolSize=3, maxPoolSize=3 2024-12-10T04:57:10,648 DEBUG [RS:0;6578523f4421:38527 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/6578523f4421:0, corePoolSize=3, maxPoolSize=3 2024-12-10T04:57:10,656 INFO [RS:0;6578523f4421:38527 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:10,656 INFO [RS:1;6578523f4421:45887 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:10,656 INFO [RS:2;6578523f4421:37799 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:10,656 INFO [RS:0;6578523f4421:38527 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:10,656 INFO [RS:1;6578523f4421:45887 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:10,656 INFO [RS:0;6578523f4421:38527 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:10,656 INFO [RS:2;6578523f4421:37799 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:10,656 INFO [RS:1;6578523f4421:45887 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:10,656 INFO [RS:0;6578523f4421:38527 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:10,656 INFO [RS:1;6578523f4421:45887 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:10,656 INFO [RS:0;6578523f4421:38527 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:10,656 INFO [RS:1;6578523f4421:45887 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:10,657 INFO [RS:0;6578523f4421:38527 {}] hbase.ChoreService(168): Chore ScheduledChore name=6578523f4421,38527,1733806628988-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-10T04:57:10,657 INFO [RS:1;6578523f4421:45887 {}] hbase.ChoreService(168): Chore ScheduledChore name=6578523f4421,45887,1733806629106-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-10T04:57:10,657 INFO [RS:2;6578523f4421:37799 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:10,657 INFO [RS:2;6578523f4421:37799 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:10,657 INFO [RS:2;6578523f4421:37799 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:10,657 INFO [RS:2;6578523f4421:37799 {}] hbase.ChoreService(168): Chore ScheduledChore name=6578523f4421,37799,1733806629213-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-10T04:57:10,674 INFO [RS:0;6578523f4421:38527 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-10T04:57:10,674 INFO [RS:2;6578523f4421:37799 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-10T04:57:10,676 INFO [RS:0;6578523f4421:38527 {}] hbase.ChoreService(168): Chore ScheduledChore name=6578523f4421,38527,1733806628988-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:10,676 INFO [RS:2;6578523f4421:37799 {}] hbase.ChoreService(168): Chore ScheduledChore name=6578523f4421,37799,1733806629213-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:10,676 INFO [RS:0;6578523f4421:38527 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:10,676 INFO [RS:2;6578523f4421:37799 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:10,676 INFO [RS:0;6578523f4421:38527 {}] regionserver.Replication(171): 6578523f4421,38527,1733806628988 started 2024-12-10T04:57:10,676 INFO [RS:2;6578523f4421:37799 {}] regionserver.Replication(171): 6578523f4421,37799,1733806629213 started 2024-12-10T04:57:10,679 INFO [RS:1;6578523f4421:45887 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-10T04:57:10,679 INFO [RS:1;6578523f4421:45887 {}] hbase.ChoreService(168): Chore ScheduledChore name=6578523f4421,45887,1733806629106-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:10,679 INFO [RS:1;6578523f4421:45887 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:10,679 INFO [RS:1;6578523f4421:45887 {}] regionserver.Replication(171): 6578523f4421,45887,1733806629106 started 2024-12-10T04:57:10,692 INFO [RS:0;6578523f4421:38527 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:10,693 INFO [RS:0;6578523f4421:38527 {}] regionserver.HRegionServer(1482): Serving as 6578523f4421,38527,1733806628988, RpcServer on 6578523f4421/172.17.0.2:38527, sessionid=0x1000e15a10d0001 2024-12-10T04:57:10,693 INFO [RS:2;6578523f4421:37799 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:10,693 DEBUG [RS:0;6578523f4421:38527 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-10T04:57:10,693 INFO [RS:2;6578523f4421:37799 {}] regionserver.HRegionServer(1482): Serving as 6578523f4421,37799,1733806629213, RpcServer on 6578523f4421/172.17.0.2:37799, sessionid=0x1000e15a10d0003 2024-12-10T04:57:10,694 DEBUG [RS:0;6578523f4421:38527 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 6578523f4421,38527,1733806628988 2024-12-10T04:57:10,694 DEBUG [RS:2;6578523f4421:37799 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-10T04:57:10,694 DEBUG [RS:0;6578523f4421:38527 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6578523f4421,38527,1733806628988' 2024-12-10T04:57:10,694 DEBUG [RS:2;6578523f4421:37799 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 6578523f4421,37799,1733806629213 2024-12-10T04:57:10,694 DEBUG [RS:2;6578523f4421:37799 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6578523f4421,37799,1733806629213' 2024-12-10T04:57:10,694 DEBUG [RS:0;6578523f4421:38527 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-10T04:57:10,694 DEBUG [RS:2;6578523f4421:37799 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-10T04:57:10,695 DEBUG [RS:2;6578523f4421:37799 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-10T04:57:10,695 DEBUG [RS:0;6578523f4421:38527 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-10T04:57:10,697 DEBUG [RS:2;6578523f4421:37799 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-10T04:57:10,697 DEBUG [RS:2;6578523f4421:37799 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-10T04:57:10,697 DEBUG [RS:2;6578523f4421:37799 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 6578523f4421,37799,1733806629213 2024-12-10T04:57:10,697 DEBUG [RS:2;6578523f4421:37799 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6578523f4421,37799,1733806629213' 2024-12-10T04:57:10,697 DEBUG [RS:2;6578523f4421:37799 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-10T04:57:10,698 DEBUG [RS:0;6578523f4421:38527 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-10T04:57:10,698 DEBUG [RS:0;6578523f4421:38527 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-10T04:57:10,698 DEBUG [RS:0;6578523f4421:38527 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 6578523f4421,38527,1733806628988 2024-12-10T04:57:10,698 DEBUG [RS:0;6578523f4421:38527 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6578523f4421,38527,1733806628988' 2024-12-10T04:57:10,698 DEBUG [RS:0;6578523f4421:38527 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-10T04:57:10,698 DEBUG [RS:2;6578523f4421:37799 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-10T04:57:10,699 DEBUG [RS:0;6578523f4421:38527 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-10T04:57:10,699 DEBUG [RS:2;6578523f4421:37799 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-10T04:57:10,699 INFO [RS:2;6578523f4421:37799 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-10T04:57:10,699 INFO [RS:2;6578523f4421:37799 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-10T04:57:10,699 DEBUG [RS:0;6578523f4421:38527 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-10T04:57:10,699 INFO [RS:0;6578523f4421:38527 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-10T04:57:10,699 INFO [RS:0;6578523f4421:38527 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-10T04:57:10,700 INFO [RS:1;6578523f4421:45887 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:10,700 INFO [RS:1;6578523f4421:45887 {}] regionserver.HRegionServer(1482): Serving as 6578523f4421,45887,1733806629106, RpcServer on 6578523f4421/172.17.0.2:45887, sessionid=0x1000e15a10d0002 2024-12-10T04:57:10,700 DEBUG [RS:1;6578523f4421:45887 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-10T04:57:10,700 DEBUG [RS:1;6578523f4421:45887 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 6578523f4421,45887,1733806629106 2024-12-10T04:57:10,700 DEBUG [RS:1;6578523f4421:45887 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6578523f4421,45887,1733806629106' 2024-12-10T04:57:10,701 DEBUG [RS:1;6578523f4421:45887 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-10T04:57:10,701 DEBUG [RS:1;6578523f4421:45887 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-10T04:57:10,702 DEBUG [RS:1;6578523f4421:45887 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-10T04:57:10,702 DEBUG [RS:1;6578523f4421:45887 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-10T04:57:10,702 DEBUG [RS:1;6578523f4421:45887 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 6578523f4421,45887,1733806629106 2024-12-10T04:57:10,702 DEBUG [RS:1;6578523f4421:45887 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6578523f4421,45887,1733806629106' 2024-12-10T04:57:10,702 DEBUG [RS:1;6578523f4421:45887 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-10T04:57:10,703 DEBUG [RS:1;6578523f4421:45887 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-10T04:57:10,703 DEBUG [RS:1;6578523f4421:45887 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-10T04:57:10,703 INFO [RS:1;6578523f4421:45887 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-10T04:57:10,704 INFO [RS:1;6578523f4421:45887 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-10T04:57:10,805 INFO [RS:2;6578523f4421:37799 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T04:57:10,805 INFO [RS:1;6578523f4421:45887 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T04:57:10,805 INFO [RS:0;6578523f4421:38527 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T04:57:10,809 INFO [RS:1;6578523f4421:45887 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6578523f4421%2C45887%2C1733806629106, suffix=, logDir=hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/WALs/6578523f4421,45887,1733806629106, archiveDir=hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/oldWALs, maxLogs=32 2024-12-10T04:57:10,810 INFO [RS:2;6578523f4421:37799 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6578523f4421%2C37799%2C1733806629213, suffix=, logDir=hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/WALs/6578523f4421,37799,1733806629213, archiveDir=hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/oldWALs, maxLogs=32 2024-12-10T04:57:10,810 INFO [RS:0;6578523f4421:38527 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6578523f4421%2C38527%2C1733806628988, suffix=, logDir=hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/WALs/6578523f4421,38527,1733806628988, archiveDir=hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/oldWALs, maxLogs=32 2024-12-10T04:57:10,825 DEBUG [RS:1;6578523f4421:45887 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/WALs/6578523f4421,45887,1733806629106/6578523f4421%2C45887%2C1733806629106.1733806630814, exclude list is [], retry=0 2024-12-10T04:57:10,830 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44817,DS-937ffc99-ba75-4f67-8823-bfaa08d3a371,DISK] 2024-12-10T04:57:10,830 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43693,DS-c7cfb6b1-21ac-4270-a3d9-06bcb373aca7,DISK] 2024-12-10T04:57:10,830 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45993,DS-0b8ed2a7-6605-4265-866d-1ebfe87d16ab,DISK] 2024-12-10T04:57:10,849 DEBUG [RS:0;6578523f4421:38527 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/WALs/6578523f4421,38527,1733806628988/6578523f4421%2C38527%2C1733806628988.1733806630814, exclude list is [], retry=0 2024-12-10T04:57:10,849 DEBUG [RS:2;6578523f4421:37799 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/WALs/6578523f4421,37799,1733806629213/6578523f4421%2C37799%2C1733806629213.1733806630814, exclude list is [], retry=0 2024-12-10T04:57:10,852 INFO [RS:1;6578523f4421:45887 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/WALs/6578523f4421,45887,1733806629106/6578523f4421%2C45887%2C1733806629106.1733806630814 2024-12-10T04:57:10,853 DEBUG [RS:1;6578523f4421:45887 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39765:39765),(127.0.0.1/127.0.0.1:39733:39733),(127.0.0.1/127.0.0.1:39471:39471)] 2024-12-10T04:57:10,855 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44817,DS-937ffc99-ba75-4f67-8823-bfaa08d3a371,DISK] 2024-12-10T04:57:10,855 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45993,DS-0b8ed2a7-6605-4265-866d-1ebfe87d16ab,DISK] 2024-12-10T04:57:10,856 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43693,DS-c7cfb6b1-21ac-4270-a3d9-06bcb373aca7,DISK] 2024-12-10T04:57:10,858 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44817,DS-937ffc99-ba75-4f67-8823-bfaa08d3a371,DISK] 2024-12-10T04:57:10,858 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45993,DS-0b8ed2a7-6605-4265-866d-1ebfe87d16ab,DISK] 2024-12-10T04:57:10,858 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43693,DS-c7cfb6b1-21ac-4270-a3d9-06bcb373aca7,DISK] 2024-12-10T04:57:10,864 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T04:57:10,864 INFO [RS:0;6578523f4421:38527 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/WALs/6578523f4421,38527,1733806628988/6578523f4421%2C38527%2C1733806628988.1733806630814 2024-12-10T04:57:10,864 INFO [RS:2;6578523f4421:37799 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/WALs/6578523f4421,37799,1733806629213/6578523f4421%2C37799%2C1733806629213.1733806630814 2024-12-10T04:57:10,865 DEBUG [RS:2;6578523f4421:37799 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39471:39471),(127.0.0.1/127.0.0.1:39765:39765),(127.0.0.1/127.0.0.1:39733:39733)] 2024-12-10T04:57:10,865 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-10T04:57:10,865 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34 2024-12-10T04:57:10,866 DEBUG [RS:0;6578523f4421:38527 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39733:39733),(127.0.0.1/127.0.0.1:39471:39471),(127.0.0.1/127.0.0.1:39765:39765)] 2024-12-10T04:57:10,873 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T04:57:10,873 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T04:57:10,878 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-841780559_22 at /127.0.0.1:45406 [Receiving block BP-1791055191-172.17.0.2-1733806623406:blk_-9223372036854775696_1017] {}] datanode.DataXceiver(331): 127.0.0.1:43693:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45406 dst: /127.0.0.1:43693 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T04:57:10,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43693 is added to blk_-9223372036854775696_1018 (size=32) 2024-12-10T04:57:10,885 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T04:57:10,886 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T04:57:10,889 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-10T04:57:10,891 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-10T04:57:10,891 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T04:57:10,892 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T04:57:10,892 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-10T04:57:10,894 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-10T04:57:10,894 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T04:57:10,895 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T04:57:10,895 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-10T04:57:10,897 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-10T04:57:10,897 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T04:57:10,897 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T04:57:10,898 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-10T04:57:10,900 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-10T04:57:10,900 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T04:57:10,901 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T04:57:10,901 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-10T04:57:10,902 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/data/hbase/meta/1588230740 2024-12-10T04:57:10,903 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/data/hbase/meta/1588230740 2024-12-10T04:57:10,905 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-10T04:57:10,905 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-10T04:57:10,906 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-10T04:57:10,908 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-10T04:57:10,913 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T04:57:10,914 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67881015, jitterRate=0.011505946516990662}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-10T04:57:10,916 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733806630886Initializing all the Stores at 1733806630888 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733806630888Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733806630888Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733806630888Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733806630888Cleaning up temporary data from old regions at 1733806630905 (+17 ms)Region opened successfully at 1733806630916 (+11 ms) 2024-12-10T04:57:10,916 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-10T04:57:10,916 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-10T04:57:10,916 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-10T04:57:10,917 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-10T04:57:10,917 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-10T04:57:10,918 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-10T04:57:10,918 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733806630916Disabling compacts and flushes for region at 1733806630916Disabling writes for close at 1733806630917 (+1 ms)Writing region close event to WAL at 1733806630918 (+1 ms)Closed at 1733806630918 2024-12-10T04:57:10,921 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-10T04:57:10,921 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-10T04:57:10,926 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-10T04:57:10,934 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-10T04:57:10,937 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-10T04:57:11,090 DEBUG [6578523f4421:32843 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-10T04:57:11,096 DEBUG [6578523f4421:32843 {}] balancer.BalancerClusterState(204): Hosts are {6578523f4421=0} racks are {/default-rack=0} 2024-12-10T04:57:11,103 DEBUG [6578523f4421:32843 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-10T04:57:11,103 DEBUG [6578523f4421:32843 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-10T04:57:11,103 DEBUG [6578523f4421:32843 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-10T04:57:11,103 DEBUG [6578523f4421:32843 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-10T04:57:11,103 DEBUG [6578523f4421:32843 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-10T04:57:11,103 DEBUG [6578523f4421:32843 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-10T04:57:11,103 INFO [6578523f4421:32843 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-10T04:57:11,103 INFO [6578523f4421:32843 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-10T04:57:11,104 INFO [6578523f4421:32843 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-10T04:57:11,104 DEBUG [6578523f4421:32843 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-10T04:57:11,110 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=6578523f4421,45887,1733806629106 2024-12-10T04:57:11,116 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6578523f4421,45887,1733806629106, state=OPENING 2024-12-10T04:57:11,167 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-10T04:57:11,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38527-0x1000e15a10d0001, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:11,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32843-0x1000e15a10d0000, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:11,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37799-0x1000e15a10d0003, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:11,180 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45887-0x1000e15a10d0002, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:11,182 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T04:57:11,182 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T04:57:11,182 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T04:57:11,184 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T04:57:11,186 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-10T04:57:11,189 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=6578523f4421,45887,1733806629106}] 2024-12-10T04:57:11,369 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-10T04:57:11,391 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33027, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-10T04:57:11,403 INFO [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-10T04:57:11,404 INFO [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T04:57:11,405 INFO [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-10T04:57:11,408 INFO [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6578523f4421%2C45887%2C1733806629106.meta, suffix=.meta, logDir=hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/WALs/6578523f4421,45887,1733806629106, archiveDir=hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/oldWALs, maxLogs=32 2024-12-10T04:57:11,424 DEBUG [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/WALs/6578523f4421,45887,1733806629106/6578523f4421%2C45887%2C1733806629106.meta.1733806631409.meta, exclude list is [], retry=0 2024-12-10T04:57:11,429 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43693,DS-c7cfb6b1-21ac-4270-a3d9-06bcb373aca7,DISK] 2024-12-10T04:57:11,429 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44817,DS-937ffc99-ba75-4f67-8823-bfaa08d3a371,DISK] 2024-12-10T04:57:11,429 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45993,DS-0b8ed2a7-6605-4265-866d-1ebfe87d16ab,DISK] 2024-12-10T04:57:11,432 INFO [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/WALs/6578523f4421,45887,1733806629106/6578523f4421%2C45887%2C1733806629106.meta.1733806631409.meta 2024-12-10T04:57:11,432 DEBUG [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39733:39733),(127.0.0.1/127.0.0.1:39471:39471),(127.0.0.1/127.0.0.1:39765:39765)] 2024-12-10T04:57:11,432 DEBUG [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-10T04:57:11,434 DEBUG [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-10T04:57:11,436 DEBUG [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-10T04:57:11,440 INFO [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-10T04:57:11,443 DEBUG [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-10T04:57:11,443 DEBUG [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T04:57:11,443 DEBUG [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-10T04:57:11,444 DEBUG [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-10T04:57:11,446 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-10T04:57:11,447 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-10T04:57:11,447 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T04:57:11,448 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T04:57:11,448 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-10T04:57:11,450 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-10T04:57:11,450 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T04:57:11,451 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T04:57:11,451 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-10T04:57:11,452 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-10T04:57:11,452 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T04:57:11,453 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T04:57:11,453 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-10T04:57:11,454 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-10T04:57:11,454 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T04:57:11,455 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T04:57:11,455 DEBUG [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-10T04:57:11,456 DEBUG [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/data/hbase/meta/1588230740 2024-12-10T04:57:11,458 DEBUG [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/data/hbase/meta/1588230740 2024-12-10T04:57:11,461 DEBUG [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-10T04:57:11,461 DEBUG [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-10T04:57:11,461 DEBUG [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-10T04:57:11,463 DEBUG [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-10T04:57:11,465 INFO [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70053817, jitterRate=0.04388321936130524}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-10T04:57:11,465 DEBUG [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-10T04:57:11,466 DEBUG [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733806631444Writing region info on filesystem at 1733806631444Initializing all the Stores at 1733806631446 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733806631446Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733806631446Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733806631446Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733806631446Cleaning up temporary data from old regions at 1733806631461 (+15 ms)Running coprocessor post-open hooks at 1733806631465 (+4 ms)Region opened successfully at 1733806631466 (+1 ms) 2024-12-10T04:57:11,475 INFO [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733806631361 2024-12-10T04:57:11,485 DEBUG [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-10T04:57:11,486 INFO [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-10T04:57:11,487 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=6578523f4421,45887,1733806629106 2024-12-10T04:57:11,489 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6578523f4421,45887,1733806629106, state=OPEN 2024-12-10T04:57:11,495 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45887-0x1000e15a10d0002, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-10T04:57:11,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32843-0x1000e15a10d0000, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-10T04:57:11,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37799-0x1000e15a10d0003, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-10T04:57:11,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38527-0x1000e15a10d0001, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-10T04:57:11,495 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T04:57:11,495 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T04:57:11,495 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T04:57:11,495 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T04:57:11,495 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=6578523f4421,45887,1733806629106 2024-12-10T04:57:11,501 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-10T04:57:11,501 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=6578523f4421,45887,1733806629106 in 306 msec 2024-12-10T04:57:11,510 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-10T04:57:11,510 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 577 msec 2024-12-10T04:57:11,512 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-10T04:57:11,513 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-10T04:57:11,529 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-10T04:57:11,530 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6578523f4421,45887,1733806629106, seqNum=-1] 2024-12-10T04:57:11,546 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T04:57:11,548 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42675, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T04:57:11,566 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.2210 sec 2024-12-10T04:57:11,566 INFO [master/6578523f4421:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733806631566, completionTime=-1 2024-12-10T04:57:11,568 INFO [master/6578523f4421:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-10T04:57:11,569 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-10T04:57:11,589 INFO [master/6578523f4421:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-10T04:57:11,589 INFO [master/6578523f4421:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733806691589 2024-12-10T04:57:11,589 INFO [master/6578523f4421:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733806751589 2024-12-10T04:57:11,589 INFO [master/6578523f4421:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 20 msec 2024-12-10T04:57:11,590 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-10T04:57:11,596 INFO [master/6578523f4421:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6578523f4421,32843,1733806628164-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:11,597 INFO [master/6578523f4421:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6578523f4421,32843,1733806628164-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:11,597 INFO [master/6578523f4421:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6578523f4421,32843,1733806628164-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:11,598 INFO [master/6578523f4421:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-6578523f4421:32843, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:11,599 INFO [master/6578523f4421:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:11,599 INFO [master/6578523f4421:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:11,604 DEBUG [master/6578523f4421:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-10T04:57:11,627 INFO [master/6578523f4421:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.244sec 2024-12-10T04:57:11,629 INFO [master/6578523f4421:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-10T04:57:11,630 INFO [master/6578523f4421:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-10T04:57:11,631 INFO [master/6578523f4421:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-10T04:57:11,631 INFO [master/6578523f4421:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-10T04:57:11,631 INFO [master/6578523f4421:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-10T04:57:11,632 INFO [master/6578523f4421:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6578523f4421,32843,1733806628164-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-10T04:57:11,632 INFO [master/6578523f4421:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6578523f4421,32843,1733806628164-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-10T04:57:11,637 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-10T04:57:11,638 INFO [master/6578523f4421:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-10T04:57:11,639 INFO [master/6578523f4421:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6578523f4421,32843,1733806628164-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:11,702 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ad5c43f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T04:57:11,706 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-10T04:57:11,706 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-10T04:57:11,710 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 6578523f4421,32843,-1 for getting cluster id 2024-12-10T04:57:11,713 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-10T04:57:11,722 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8129eecb-9fee-458b-a93b-7fd55d04eeb5' 2024-12-10T04:57:11,723 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-10T04:57:11,724 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8129eecb-9fee-458b-a93b-7fd55d04eeb5" 2024-12-10T04:57:11,724 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54300a5d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T04:57:11,724 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [6578523f4421,32843,-1] 2024-12-10T04:57:11,727 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-10T04:57:11,729 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T04:57:11,730 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33092, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-10T04:57:11,733 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b1ff74e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T04:57:11,733 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-10T04:57:11,740 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6578523f4421,45887,1733806629106, seqNum=-1] 2024-12-10T04:57:11,741 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T04:57:11,743 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34432, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T04:57:11,762 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=6578523f4421,32843,1733806628164 2024-12-10T04:57:11,767 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-10T04:57:11,772 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 6578523f4421,32843,1733806628164 2024-12-10T04:57:11,774 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@42f1365a 2024-12-10T04:57:11,775 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-10T04:57:11,777 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33106, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-10T04:57:11,783 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32843 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T04:57:11,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32843 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-10T04:57:11,793 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-10T04:57:11,795 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32843 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-10T04:57:11,795 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T04:57:11,798 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-10T04:57:11,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32843 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-10T04:57:11,808 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T04:57:11,808 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T04:57:11,811 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-841780559_22 at /127.0.0.1:45458 [Receiving block BP-1791055191-172.17.0.2-1733806623406:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:43693:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45458 dst: /127.0.0.1:43693 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T04:57:11,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43693 is added to blk_-9223372036854775680_1021 (size=392) 2024-12-10T04:57:11,817 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T04:57:11,819 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 5455a92b09e90219f0f34a7bd78e4528, NAME => 'TestHBaseWalOnEC,,1733806631779.5455a92b09e90219f0f34a7bd78e4528.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34 2024-12-10T04:57:11,826 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T04:57:11,827 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T04:57:11,829 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-841780559_22 at /127.0.0.1:45464 [Receiving block BP-1791055191-172.17.0.2-1733806623406:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:43693:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45464 dst: /127.0.0.1:43693 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T04:57:11,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43693 is added to blk_-9223372036854775664_1023 (size=51) 2024-12-10T04:57:11,836 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T04:57:11,837 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733806631779.5455a92b09e90219f0f34a7bd78e4528.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T04:57:11,837 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 5455a92b09e90219f0f34a7bd78e4528, disabling compactions & flushes 2024-12-10T04:57:11,837 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733806631779.5455a92b09e90219f0f34a7bd78e4528. 2024-12-10T04:57:11,837 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733806631779.5455a92b09e90219f0f34a7bd78e4528. 2024-12-10T04:57:11,837 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733806631779.5455a92b09e90219f0f34a7bd78e4528. after waiting 0 ms 2024-12-10T04:57:11,837 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733806631779.5455a92b09e90219f0f34a7bd78e4528. 2024-12-10T04:57:11,837 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733806631779.5455a92b09e90219f0f34a7bd78e4528. 2024-12-10T04:57:11,837 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 5455a92b09e90219f0f34a7bd78e4528: Waiting for close lock at 1733806631837Disabling compacts and flushes for region at 1733806631837Disabling writes for close at 1733806631837Writing region close event to WAL at 1733806631837Closed at 1733806631837 2024-12-10T04:57:11,840 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-10T04:57:11,844 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733806631779.5455a92b09e90219f0f34a7bd78e4528.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733806631840"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733806631840"}]},"ts":"1733806631840"} 2024-12-10T04:57:11,849 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-10T04:57:11,851 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-10T04:57:11,854 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733806631851"}]},"ts":"1733806631851"} 2024-12-10T04:57:11,859 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-10T04:57:11,859 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {6578523f4421=0} racks are {/default-rack=0} 2024-12-10T04:57:11,861 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-10T04:57:11,861 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-10T04:57:11,861 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-10T04:57:11,861 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-10T04:57:11,861 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-10T04:57:11,861 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-10T04:57:11,861 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-10T04:57:11,861 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-10T04:57:11,861 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-10T04:57:11,861 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-10T04:57:11,863 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=5455a92b09e90219f0f34a7bd78e4528, ASSIGN}] 2024-12-10T04:57:11,865 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=5455a92b09e90219f0f34a7bd78e4528, ASSIGN 2024-12-10T04:57:11,867 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=5455a92b09e90219f0f34a7bd78e4528, ASSIGN; state=OFFLINE, location=6578523f4421,45887,1733806629106; forceNewPlan=false, retain=false 2024-12-10T04:57:11,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32843 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-10T04:57:12,025 INFO [6578523f4421:32843 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-10T04:57:12,026 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=5455a92b09e90219f0f34a7bd78e4528, regionState=OPENING, regionLocation=6578523f4421,45887,1733806629106 2024-12-10T04:57:12,032 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=5455a92b09e90219f0f34a7bd78e4528, ASSIGN because future has completed 2024-12-10T04:57:12,033 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5455a92b09e90219f0f34a7bd78e4528, server=6578523f4421,45887,1733806629106}] 2024-12-10T04:57:12,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32843 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-10T04:57:12,195 INFO [RS_OPEN_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733806631779.5455a92b09e90219f0f34a7bd78e4528. 2024-12-10T04:57:12,196 DEBUG [RS_OPEN_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 5455a92b09e90219f0f34a7bd78e4528, NAME => 'TestHBaseWalOnEC,,1733806631779.5455a92b09e90219f0f34a7bd78e4528.', STARTKEY => '', ENDKEY => ''} 2024-12-10T04:57:12,196 DEBUG [RS_OPEN_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 5455a92b09e90219f0f34a7bd78e4528 2024-12-10T04:57:12,196 DEBUG [RS_OPEN_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733806631779.5455a92b09e90219f0f34a7bd78e4528.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T04:57:12,197 DEBUG [RS_OPEN_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 5455a92b09e90219f0f34a7bd78e4528 2024-12-10T04:57:12,197 DEBUG [RS_OPEN_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 5455a92b09e90219f0f34a7bd78e4528 2024-12-10T04:57:12,201 INFO [StoreOpener-5455a92b09e90219f0f34a7bd78e4528-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 5455a92b09e90219f0f34a7bd78e4528 2024-12-10T04:57:12,203 INFO [StoreOpener-5455a92b09e90219f0f34a7bd78e4528-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5455a92b09e90219f0f34a7bd78e4528 columnFamilyName cf 2024-12-10T04:57:12,203 DEBUG [StoreOpener-5455a92b09e90219f0f34a7bd78e4528-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T04:57:12,204 INFO [StoreOpener-5455a92b09e90219f0f34a7bd78e4528-1 {}] regionserver.HStore(327): Store=5455a92b09e90219f0f34a7bd78e4528/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T04:57:12,204 DEBUG [RS_OPEN_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 5455a92b09e90219f0f34a7bd78e4528 2024-12-10T04:57:12,206 DEBUG [RS_OPEN_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/data/default/TestHBaseWalOnEC/5455a92b09e90219f0f34a7bd78e4528 2024-12-10T04:57:12,206 DEBUG [RS_OPEN_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/data/default/TestHBaseWalOnEC/5455a92b09e90219f0f34a7bd78e4528 2024-12-10T04:57:12,207 DEBUG [RS_OPEN_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 5455a92b09e90219f0f34a7bd78e4528 2024-12-10T04:57:12,207 DEBUG [RS_OPEN_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 5455a92b09e90219f0f34a7bd78e4528 2024-12-10T04:57:12,209 DEBUG [RS_OPEN_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 5455a92b09e90219f0f34a7bd78e4528 2024-12-10T04:57:12,214 DEBUG [RS_OPEN_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/data/default/TestHBaseWalOnEC/5455a92b09e90219f0f34a7bd78e4528/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T04:57:12,215 INFO [RS_OPEN_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 5455a92b09e90219f0f34a7bd78e4528; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70434323, jitterRate=0.04955320060253143}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T04:57:12,215 DEBUG [RS_OPEN_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5455a92b09e90219f0f34a7bd78e4528 2024-12-10T04:57:12,216 DEBUG [RS_OPEN_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 5455a92b09e90219f0f34a7bd78e4528: Running coprocessor pre-open hook at 1733806632197Writing region info on filesystem at 1733806632197Initializing all the Stores at 1733806632200 (+3 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733806632200Cleaning up temporary data from old regions at 1733806632207 (+7 ms)Running coprocessor post-open hooks at 1733806632215 (+8 ms)Region opened successfully at 1733806632216 (+1 ms) 2024-12-10T04:57:12,218 INFO [RS_OPEN_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733806631779.5455a92b09e90219f0f34a7bd78e4528., pid=6, masterSystemTime=1733806632187 2024-12-10T04:57:12,221 DEBUG [RS_OPEN_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733806631779.5455a92b09e90219f0f34a7bd78e4528. 2024-12-10T04:57:12,222 INFO [RS_OPEN_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733806631779.5455a92b09e90219f0f34a7bd78e4528. 2024-12-10T04:57:12,223 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=5455a92b09e90219f0f34a7bd78e4528, regionState=OPEN, openSeqNum=2, regionLocation=6578523f4421,45887,1733806629106 2024-12-10T04:57:12,227 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5455a92b09e90219f0f34a7bd78e4528, server=6578523f4421,45887,1733806629106 because future has completed 2024-12-10T04:57:12,239 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-10T04:57:12,239 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 5455a92b09e90219f0f34a7bd78e4528, server=6578523f4421,45887,1733806629106 in 200 msec 2024-12-10T04:57:12,243 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-10T04:57:12,244 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=5455a92b09e90219f0f34a7bd78e4528, ASSIGN in 376 msec 2024-12-10T04:57:12,245 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-10T04:57:12,246 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733806632245"}]},"ts":"1733806632245"} 2024-12-10T04:57:12,249 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-10T04:57:12,251 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-10T04:57:12,255 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 465 msec 2024-12-10T04:57:12,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32843 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-10T04:57:12,431 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-10T04:57:12,431 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-10T04:57:12,436 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T04:57:12,443 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-10T04:57:12,443 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T04:57:12,444 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-10T04:57:12,454 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733806631779.5455a92b09e90219f0f34a7bd78e4528., hostname=6578523f4421,45887,1733806629106, seqNum=2] 2024-12-10T04:57:12,466 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32843 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-10T04:57:12,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32843 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-10T04:57:12,475 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-10T04:57:12,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32843 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-10T04:57:12,478 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T04:57:12,480 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T04:57:12,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32843 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-10T04:57:12,647 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45887 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-10T04:57:12,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6578523f4421:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733806631779.5455a92b09e90219f0f34a7bd78e4528. 2024-12-10T04:57:12,653 INFO [RS_FLUSH_OPERATIONS-regionserver/6578523f4421:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 5455a92b09e90219f0f34a7bd78e4528 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-10T04:57:12,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6578523f4421:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/data/default/TestHBaseWalOnEC/5455a92b09e90219f0f34a7bd78e4528/.tmp/cf/4beb369244914588a959df997fb1de9d is 36, key is row/cf:cq/1733806632457/Put/seqid=0 2024-12-10T04:57:12,705 WARN [RS_FLUSH_OPERATIONS-regionserver/6578523f4421:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T04:57:12,705 WARN [RS_FLUSH_OPERATIONS-regionserver/6578523f4421:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T04:57:12,709 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2023266466_22 at /127.0.0.1:47830 [Receiving block BP-1791055191-172.17.0.2-1733806623406:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:44817:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47830 dst: /127.0.0.1:44817 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T04:57:12,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44817 is added to blk_-9223372036854775648_1025 (size=4787) 2024-12-10T04:57:12,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32843 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-10T04:57:13,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45993 is added to blk_-9223372036854775756_1006 (size=196) 2024-12-10T04:57:13,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44817 is added to blk_-9223372036854775757_1006 (size=196) 2024-12-10T04:57:13,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44817 is added to blk_-9223372036854775741_1008 (size=1189) 2024-12-10T04:57:13,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45993 is added to blk_-9223372036854775740_1008 (size=1189) 2024-12-10T04:57:13,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32843 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-10T04:57:13,115 WARN [RS_FLUSH_OPERATIONS-regionserver/6578523f4421:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T04:57:13,115 INFO [RS_FLUSH_OPERATIONS-regionserver/6578523f4421:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/data/default/TestHBaseWalOnEC/5455a92b09e90219f0f34a7bd78e4528/.tmp/cf/4beb369244914588a959df997fb1de9d 2024-12-10T04:57:13,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43693 is added to blk_-9223372036854775772_1004 (size=42) 2024-12-10T04:57:13,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45993 is added to blk_-9223372036854775773_1004 (size=42) 2024-12-10T04:57:13,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6578523f4421:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/data/default/TestHBaseWalOnEC/5455a92b09e90219f0f34a7bd78e4528/.tmp/cf/4beb369244914588a959df997fb1de9d as hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/data/default/TestHBaseWalOnEC/5455a92b09e90219f0f34a7bd78e4528/cf/4beb369244914588a959df997fb1de9d 2024-12-10T04:57:13,167 INFO [RS_FLUSH_OPERATIONS-regionserver/6578523f4421:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/data/default/TestHBaseWalOnEC/5455a92b09e90219f0f34a7bd78e4528/cf/4beb369244914588a959df997fb1de9d, entries=1, sequenceid=5, filesize=4.7 K 2024-12-10T04:57:13,173 INFO [RS_FLUSH_OPERATIONS-regionserver/6578523f4421:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 5455a92b09e90219f0f34a7bd78e4528 in 521ms, sequenceid=5, compaction requested=false 2024-12-10T04:57:13,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6578523f4421:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-12-10T04:57:13,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6578523f4421:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 5455a92b09e90219f0f34a7bd78e4528: 2024-12-10T04:57:13,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6578523f4421:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733806631779.5455a92b09e90219f0f34a7bd78e4528. 2024-12-10T04:57:13,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6578523f4421:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-10T04:57:13,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32843 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-10T04:57:13,184 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-10T04:57:13,184 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 701 msec 2024-12-10T04:57:13,188 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 717 msec 2024-12-10T04:57:13,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32843 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-10T04:57:13,609 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-10T04:57:13,628 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-10T04:57:13,628 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-10T04:57:13,628 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T04:57:13,631 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T04:57:13,632 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T04:57:13,632 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-10T04:57:13,632 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-10T04:57:13,632 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=2134490824, stopped=false 2024-12-10T04:57:13,632 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=6578523f4421,32843,1733806628164 2024-12-10T04:57:13,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38527-0x1000e15a10d0001, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-10T04:57:13,672 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45887-0x1000e15a10d0002, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-10T04:57:13,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37799-0x1000e15a10d0003, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-10T04:57:13,672 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45887-0x1000e15a10d0002, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:13,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38527-0x1000e15a10d0001, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:13,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32843-0x1000e15a10d0000, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-10T04:57:13,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37799-0x1000e15a10d0003, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:13,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32843-0x1000e15a10d0000, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:13,673 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-10T04:57:13,673 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-10T04:57:13,674 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45887-0x1000e15a10d0002, quorum=127.0.0.1:49472, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T04:57:13,674 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38527-0x1000e15a10d0001, quorum=127.0.0.1:49472, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T04:57:13,674 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37799-0x1000e15a10d0003, quorum=127.0.0.1:49472, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T04:57:13,675 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:32843-0x1000e15a10d0000, quorum=127.0.0.1:49472, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T04:57:13,675 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T04:57:13,675 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T04:57:13,676 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '6578523f4421,38527,1733806628988' ***** 2024-12-10T04:57:13,676 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-10T04:57:13,677 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '6578523f4421,45887,1733806629106' ***** 2024-12-10T04:57:13,677 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-10T04:57:13,677 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '6578523f4421,37799,1733806629213' ***** 2024-12-10T04:57:13,677 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-10T04:57:13,677 INFO [RS:0;6578523f4421:38527 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-10T04:57:13,677 INFO [RS:1;6578523f4421:45887 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-10T04:57:13,678 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-10T04:57:13,678 INFO [RS:1;6578523f4421:45887 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-10T04:57:13,678 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-10T04:57:13,678 INFO [RS:1;6578523f4421:45887 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-10T04:57:13,678 INFO [RS:0;6578523f4421:38527 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-10T04:57:13,678 INFO [RS:0;6578523f4421:38527 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-10T04:57:13,678 INFO [RS:0;6578523f4421:38527 {}] regionserver.HRegionServer(959): stopping server 6578523f4421,38527,1733806628988 2024-12-10T04:57:13,678 INFO [RS:1;6578523f4421:45887 {}] regionserver.HRegionServer(3091): Received CLOSE for 5455a92b09e90219f0f34a7bd78e4528 2024-12-10T04:57:13,679 INFO [RS:0;6578523f4421:38527 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-10T04:57:13,679 INFO [RS:0;6578523f4421:38527 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;6578523f4421:38527. 2024-12-10T04:57:13,679 DEBUG [RS:0;6578523f4421:38527 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T04:57:13,679 DEBUG [RS:0;6578523f4421:38527 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T04:57:13,679 INFO [RS:1;6578523f4421:45887 {}] regionserver.HRegionServer(959): stopping server 6578523f4421,45887,1733806629106 2024-12-10T04:57:13,679 INFO [RS:1;6578523f4421:45887 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-10T04:57:13,679 INFO [RS:0;6578523f4421:38527 {}] regionserver.HRegionServer(976): stopping server 6578523f4421,38527,1733806628988; all regions closed. 2024-12-10T04:57:13,679 INFO [RS:1;6578523f4421:45887 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;6578523f4421:45887. 2024-12-10T04:57:13,680 DEBUG [RS:1;6578523f4421:45887 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T04:57:13,680 DEBUG [RS:1;6578523f4421:45887 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T04:57:13,680 INFO [RS:1;6578523f4421:45887 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-10T04:57:13,680 INFO [RS:1;6578523f4421:45887 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-10T04:57:13,680 INFO [RS:1;6578523f4421:45887 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-10T04:57:13,680 INFO [RS:1;6578523f4421:45887 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-10T04:57:13,680 DEBUG [RS_CLOSE_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 5455a92b09e90219f0f34a7bd78e4528, disabling compactions & flushes 2024-12-10T04:57:13,680 INFO [RS_CLOSE_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733806631779.5455a92b09e90219f0f34a7bd78e4528. 2024-12-10T04:57:13,680 DEBUG [RS_CLOSE_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733806631779.5455a92b09e90219f0f34a7bd78e4528. 2024-12-10T04:57:13,680 INFO [RS:2;6578523f4421:37799 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-10T04:57:13,680 DEBUG [RS_CLOSE_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733806631779.5455a92b09e90219f0f34a7bd78e4528. after waiting 0 ms 2024-12-10T04:57:13,681 DEBUG [RS_CLOSE_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733806631779.5455a92b09e90219f0f34a7bd78e4528. 2024-12-10T04:57:13,681 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-10T04:57:13,681 INFO [RS:1;6578523f4421:45887 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-10T04:57:13,681 DEBUG [RS:1;6578523f4421:45887 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 5455a92b09e90219f0f34a7bd78e4528=TestHBaseWalOnEC,,1733806631779.5455a92b09e90219f0f34a7bd78e4528.} 2024-12-10T04:57:13,681 DEBUG [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-10T04:57:13,681 INFO [RS:2;6578523f4421:37799 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-10T04:57:13,681 INFO [RS:2;6578523f4421:37799 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-10T04:57:13,681 INFO [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-10T04:57:13,681 INFO [RS:2;6578523f4421:37799 {}] regionserver.HRegionServer(959): stopping server 6578523f4421,37799,1733806629213 2024-12-10T04:57:13,681 DEBUG [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-10T04:57:13,681 INFO [RS:2;6578523f4421:37799 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-10T04:57:13,681 DEBUG [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-10T04:57:13,681 INFO [RS:2;6578523f4421:37799 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;6578523f4421:37799. 2024-12-10T04:57:13,681 DEBUG [RS:1;6578523f4421:45887 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 5455a92b09e90219f0f34a7bd78e4528 2024-12-10T04:57:13,681 DEBUG [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-10T04:57:13,681 DEBUG [RS:2;6578523f4421:37799 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T04:57:13,681 DEBUG [RS:2;6578523f4421:37799 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T04:57:13,681 INFO [RS:2;6578523f4421:37799 {}] regionserver.HRegionServer(976): stopping server 6578523f4421,37799,1733806629213; all regions closed. 2024-12-10T04:57:13,682 INFO [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-10T04:57:13,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44817 is added to blk_1073741827_1015 (size=93) 2024-12-10T04:57:13,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44817 is added to blk_1073741828_1016 (size=93) 2024-12-10T04:57:13,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43693 is added to blk_1073741827_1015 (size=93) 2024-12-10T04:57:13,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43693 is added to blk_1073741828_1016 (size=93) 2024-12-10T04:57:13,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45993 is added to blk_1073741827_1015 (size=93) 2024-12-10T04:57:13,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45993 is added to blk_1073741828_1016 (size=93) 2024-12-10T04:57:13,693 DEBUG [RS:2;6578523f4421:37799 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/oldWALs 2024-12-10T04:57:13,693 DEBUG [RS:0;6578523f4421:38527 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/oldWALs 2024-12-10T04:57:13,693 INFO [RS:0;6578523f4421:38527 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 6578523f4421%2C38527%2C1733806628988:(num 1733806630814) 2024-12-10T04:57:13,693 INFO [RS:2;6578523f4421:37799 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 6578523f4421%2C37799%2C1733806629213:(num 1733806630814) 2024-12-10T04:57:13,693 DEBUG [RS:2;6578523f4421:37799 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T04:57:13,693 DEBUG [RS:0;6578523f4421:38527 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T04:57:13,693 INFO [RS:2;6578523f4421:37799 {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T04:57:13,693 INFO [RS:0;6578523f4421:38527 {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T04:57:13,694 INFO [RS:0;6578523f4421:38527 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-10T04:57:13,694 INFO [RS:2;6578523f4421:37799 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-10T04:57:13,694 INFO [RS:2;6578523f4421:37799 {}] hbase.ChoreService(370): Chore service for: regionserver/6578523f4421:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-10T04:57:13,694 INFO [RS:0;6578523f4421:38527 {}] hbase.ChoreService(370): Chore service for: regionserver/6578523f4421:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-10T04:57:13,694 INFO [RS:0;6578523f4421:38527 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-10T04:57:13,694 INFO [RS:2;6578523f4421:37799 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-10T04:57:13,694 INFO [regionserver/6578523f4421:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-10T04:57:13,694 INFO [RS:2;6578523f4421:37799 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-10T04:57:13,694 INFO [RS:0;6578523f4421:38527 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-10T04:57:13,694 INFO [RS:2;6578523f4421:37799 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-10T04:57:13,694 INFO [RS:0;6578523f4421:38527 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-10T04:57:13,694 INFO [regionserver/6578523f4421:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-10T04:57:13,694 INFO [RS:2;6578523f4421:37799 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-10T04:57:13,694 INFO [RS:0;6578523f4421:38527 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-10T04:57:13,694 INFO [RS:0;6578523f4421:38527 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38527 2024-12-10T04:57:13,694 INFO [RS:2;6578523f4421:37799 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37799 2024-12-10T04:57:13,698 DEBUG [RS_CLOSE_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/data/default/TestHBaseWalOnEC/5455a92b09e90219f0f34a7bd78e4528/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-10T04:57:13,701 INFO [RS_CLOSE_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733806631779.5455a92b09e90219f0f34a7bd78e4528. 2024-12-10T04:57:13,701 DEBUG [RS_CLOSE_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 5455a92b09e90219f0f34a7bd78e4528: Waiting for close lock at 1733806633679Running coprocessor pre-close hooks at 1733806633680 (+1 ms)Disabling compacts and flushes for region at 1733806633680Disabling writes for close at 1733806633681 (+1 ms)Writing region close event to WAL at 1733806633682 (+1 ms)Running coprocessor post-close hooks at 1733806633699 (+17 ms)Closed at 1733806633701 (+2 ms) 2024-12-10T04:57:13,702 DEBUG [RS_CLOSE_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733806631779.5455a92b09e90219f0f34a7bd78e4528. 2024-12-10T04:57:13,713 DEBUG [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/data/hbase/meta/1588230740/.tmp/info/bf334d6166854408b5a37d7f66c4237a is 153, key is TestHBaseWalOnEC,,1733806631779.5455a92b09e90219f0f34a7bd78e4528./info:regioninfo/1733806632223/Put/seqid=0 2024-12-10T04:57:13,716 WARN [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T04:57:13,716 WARN [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T04:57:13,719 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2023266466_22 at /127.0.0.1:45490 [Receiving block BP-1791055191-172.17.0.2-1733806623406:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:43693:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45490 dst: /127.0.0.1:43693 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T04:57:13,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43693 is added to blk_-9223372036854775632_1027 (size=6637) 2024-12-10T04:57:13,724 WARN [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T04:57:13,724 INFO [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/data/hbase/meta/1588230740/.tmp/info/bf334d6166854408b5a37d7f66c4237a 2024-12-10T04:57:13,726 INFO [regionserver/6578523f4421:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-10T04:57:13,726 INFO [regionserver/6578523f4421:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-10T04:57:13,748 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37799-0x1000e15a10d0003, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/6578523f4421,37799,1733806629213 2024-12-10T04:57:13,748 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32843-0x1000e15a10d0000, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-10T04:57:13,749 INFO [RS:2;6578523f4421:37799 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-10T04:57:13,748 DEBUG [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/data/hbase/meta/1588230740/.tmp/ns/35949d2574f147369251b728687a9ddd is 43, key is default/ns:d/1733806631552/Put/seqid=0 2024-12-10T04:57:13,751 WARN [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T04:57:13,751 WARN [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T04:57:13,754 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2023266466_22 at /127.0.0.1:37560 [Receiving block BP-1791055191-172.17.0.2-1733806623406:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:45993:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37560 dst: /127.0.0.1:45993 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T04:57:13,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38527-0x1000e15a10d0001, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/6578523f4421,38527,1733806628988 2024-12-10T04:57:13,758 INFO [RS:0;6578523f4421:38527 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-10T04:57:13,759 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [6578523f4421,37799,1733806629213] 2024-12-10T04:57:13,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45993 is added to blk_-9223372036854775616_1029 (size=5153) 2024-12-10T04:57:13,760 WARN [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T04:57:13,760 INFO [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/data/hbase/meta/1588230740/.tmp/ns/35949d2574f147369251b728687a9ddd 2024-12-10T04:57:13,761 INFO [regionserver/6578523f4421:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T04:57:13,761 INFO [regionserver/6578523f4421:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T04:57:13,761 INFO [regionserver/6578523f4421:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T04:57:13,779 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/6578523f4421,37799,1733806629213 already deleted, retry=false 2024-12-10T04:57:13,779 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 6578523f4421,37799,1733806629213 expired; onlineServers=2 2024-12-10T04:57:13,779 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [6578523f4421,38527,1733806628988] 2024-12-10T04:57:13,785 DEBUG [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/data/hbase/meta/1588230740/.tmp/table/7ae452286640466b829606b78ff7fb30 is 52, key is TestHBaseWalOnEC/table:state/1733806632245/Put/seqid=0 2024-12-10T04:57:13,787 WARN [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T04:57:13,787 WARN [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T04:57:13,789 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/6578523f4421,38527,1733806628988 already deleted, retry=false 2024-12-10T04:57:13,789 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 6578523f4421,38527,1733806628988 expired; onlineServers=1 2024-12-10T04:57:13,790 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2023266466_22 at /127.0.0.1:47898 [Receiving block BP-1791055191-172.17.0.2-1733806623406:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:44817:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47898 dst: /127.0.0.1:44817 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T04:57:13,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44817 is added to blk_-9223372036854775600_1031 (size=5249) 2024-12-10T04:57:13,796 WARN [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T04:57:13,796 INFO [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/data/hbase/meta/1588230740/.tmp/table/7ae452286640466b829606b78ff7fb30 2024-12-10T04:57:13,806 DEBUG [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/data/hbase/meta/1588230740/.tmp/info/bf334d6166854408b5a37d7f66c4237a as hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/data/hbase/meta/1588230740/info/bf334d6166854408b5a37d7f66c4237a 2024-12-10T04:57:13,815 INFO [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/data/hbase/meta/1588230740/info/bf334d6166854408b5a37d7f66c4237a, entries=10, sequenceid=11, filesize=6.5 K 2024-12-10T04:57:13,816 DEBUG [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/data/hbase/meta/1588230740/.tmp/ns/35949d2574f147369251b728687a9ddd as hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/data/hbase/meta/1588230740/ns/35949d2574f147369251b728687a9ddd 2024-12-10T04:57:13,826 INFO [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/data/hbase/meta/1588230740/ns/35949d2574f147369251b728687a9ddd, entries=2, sequenceid=11, filesize=5.0 K 2024-12-10T04:57:13,828 DEBUG [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/data/hbase/meta/1588230740/.tmp/table/7ae452286640466b829606b78ff7fb30 as hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/data/hbase/meta/1588230740/table/7ae452286640466b829606b78ff7fb30 2024-12-10T04:57:13,836 INFO [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/data/hbase/meta/1588230740/table/7ae452286640466b829606b78ff7fb30, entries=2, sequenceid=11, filesize=5.1 K 2024-12-10T04:57:13,838 INFO [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 157ms, sequenceid=11, compaction requested=false 2024-12-10T04:57:13,838 DEBUG [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-10T04:57:13,846 DEBUG [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-10T04:57:13,847 DEBUG [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-10T04:57:13,847 INFO [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-10T04:57:13,847 DEBUG [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733806633681Running coprocessor pre-close hooks at 1733806633681Disabling compacts and flushes for region at 1733806633681Disabling writes for close at 1733806633681Obtaining lock to block concurrent updates at 1733806633682 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733806633682Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733806633682Flushing stores of hbase:meta,,1.1588230740 at 1733806633683 (+1 ms)Flushing 1588230740/info: creating writer at 1733806633684 (+1 ms)Flushing 1588230740/info: appending metadata at 1733806633710 (+26 ms)Flushing 1588230740/info: closing flushed file at 1733806633710Flushing 1588230740/ns: creating writer at 1733806633733 (+23 ms)Flushing 1588230740/ns: appending metadata at 1733806633747 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1733806633747Flushing 1588230740/table: creating writer at 1733806633768 (+21 ms)Flushing 1588230740/table: appending metadata at 1733806633785 (+17 ms)Flushing 1588230740/table: closing flushed file at 1733806633785Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4834395b: reopening flushed file at 1733806633805 (+20 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1784716e: reopening flushed file at 1733806633815 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@24711e38: reopening flushed file at 1733806633826 (+11 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 157ms, sequenceid=11, compaction requested=false at 1733806633838 (+12 ms)Writing region close event to WAL at 1733806633839 (+1 ms)Running coprocessor post-close hooks at 1733806633847 (+8 ms)Closed at 1733806633847 2024-12-10T04:57:13,848 DEBUG [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-10T04:57:13,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37799-0x1000e15a10d0003, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T04:57:13,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37799-0x1000e15a10d0003, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T04:57:13,869 INFO [RS:2;6578523f4421:37799 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-10T04:57:13,870 INFO [RS:2;6578523f4421:37799 {}] regionserver.HRegionServer(1031): Exiting; stopping=6578523f4421,37799,1733806629213; zookeeper connection closed. 2024-12-10T04:57:13,870 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7fb1290e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7fb1290e 2024-12-10T04:57:13,879 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38527-0x1000e15a10d0001, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T04:57:13,879 INFO [RS:0;6578523f4421:38527 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-10T04:57:13,879 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38527-0x1000e15a10d0001, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T04:57:13,879 INFO [RS:0;6578523f4421:38527 {}] regionserver.HRegionServer(1031): Exiting; stopping=6578523f4421,38527,1733806628988; zookeeper connection closed. 2024-12-10T04:57:13,880 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4598fb4f {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4598fb4f 2024-12-10T04:57:13,881 INFO [RS:1;6578523f4421:45887 {}] regionserver.HRegionServer(976): stopping server 6578523f4421,45887,1733806629106; all regions closed. 2024-12-10T04:57:13,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45993 is added to blk_1073741829_1019 (size=2751) 2024-12-10T04:57:13,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44817 is added to blk_1073741829_1019 (size=2751) 2024-12-10T04:57:13,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43693 is added to blk_1073741829_1019 (size=2751) 2024-12-10T04:57:13,892 DEBUG [RS:1;6578523f4421:45887 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/oldWALs 2024-12-10T04:57:13,892 INFO [RS:1;6578523f4421:45887 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 6578523f4421%2C45887%2C1733806629106.meta:.meta(num 1733806631409) 2024-12-10T04:57:13,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43693 is added to blk_1073741826_1014 (size=1298) 2024-12-10T04:57:13,896 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/WALs/6578523f4421,45887,1733806629106/6578523f4421%2C45887%2C1733806629106.1733806630814 not finished, retry = 0 2024-12-10T04:57:13,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44817 is added to blk_1073741826_1014 (size=1298) 2024-12-10T04:57:13,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45993 is added to blk_1073741826_1014 (size=1298) 2024-12-10T04:57:14,004 DEBUG [RS:1;6578523f4421:45887 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/oldWALs 2024-12-10T04:57:14,004 INFO [RS:1;6578523f4421:45887 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 6578523f4421%2C45887%2C1733806629106:(num 1733806630814) 2024-12-10T04:57:14,005 DEBUG [RS:1;6578523f4421:45887 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T04:57:14,005 INFO [RS:1;6578523f4421:45887 {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T04:57:14,005 INFO [RS:1;6578523f4421:45887 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-10T04:57:14,005 INFO [RS:1;6578523f4421:45887 {}] hbase.ChoreService(370): Chore service for: regionserver/6578523f4421:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-10T04:57:14,006 INFO [RS:1;6578523f4421:45887 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-10T04:57:14,006 INFO [regionserver/6578523f4421:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-10T04:57:14,006 INFO [RS:1;6578523f4421:45887 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45887 2024-12-10T04:57:14,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32843-0x1000e15a10d0000, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-10T04:57:14,020 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45887-0x1000e15a10d0002, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/6578523f4421,45887,1733806629106 2024-12-10T04:57:14,020 INFO [RS:1;6578523f4421:45887 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-10T04:57:14,032 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [6578523f4421,45887,1733806629106] 2024-12-10T04:57:14,042 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/6578523f4421,45887,1733806629106 already deleted, retry=false 2024-12-10T04:57:14,042 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 6578523f4421,45887,1733806629106 expired; onlineServers=0 2024-12-10T04:57:14,042 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '6578523f4421,32843,1733806628164' ***** 2024-12-10T04:57:14,042 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-10T04:57:14,043 INFO [M:0;6578523f4421:32843 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-10T04:57:14,043 INFO [M:0;6578523f4421:32843 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-10T04:57:14,043 DEBUG [M:0;6578523f4421:32843 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-10T04:57:14,043 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-10T04:57:14,043 DEBUG [M:0;6578523f4421:32843 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-10T04:57:14,043 DEBUG [master/6578523f4421:0:becomeActiveMaster-HFileCleaner.small.0-1733806630448 {}] cleaner.HFileCleaner(306): Exit Thread[master/6578523f4421:0:becomeActiveMaster-HFileCleaner.small.0-1733806630448,5,FailOnTimeoutGroup] 2024-12-10T04:57:14,043 DEBUG [master/6578523f4421:0:becomeActiveMaster-HFileCleaner.large.0-1733806630448 {}] cleaner.HFileCleaner(306): Exit Thread[master/6578523f4421:0:becomeActiveMaster-HFileCleaner.large.0-1733806630448,5,FailOnTimeoutGroup] 2024-12-10T04:57:14,044 INFO [M:0;6578523f4421:32843 {}] hbase.ChoreService(370): Chore service for: master/6578523f4421:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-10T04:57:14,044 INFO [M:0;6578523f4421:32843 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-10T04:57:14,044 DEBUG [M:0;6578523f4421:32843 {}] master.HMaster(1795): Stopping service threads 2024-12-10T04:57:14,045 INFO [M:0;6578523f4421:32843 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-10T04:57:14,045 INFO [M:0;6578523f4421:32843 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-10T04:57:14,046 INFO [M:0;6578523f4421:32843 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-10T04:57:14,046 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-10T04:57:14,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32843-0x1000e15a10d0000, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-10T04:57:14,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32843-0x1000e15a10d0000, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:14,053 DEBUG [M:0;6578523f4421:32843 {}] zookeeper.ZKUtil(347): master:32843-0x1000e15a10d0000, quorum=127.0.0.1:49472, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-10T04:57:14,053 WARN [M:0;6578523f4421:32843 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-10T04:57:14,054 INFO [M:0;6578523f4421:32843 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/.lastflushedseqids 2024-12-10T04:57:14,062 WARN [M:0;6578523f4421:32843 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T04:57:14,062 WARN [M:0;6578523f4421:32843 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T04:57:14,064 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-841780559_22 at /127.0.0.1:37568 [Receiving block BP-1791055191-172.17.0.2-1733806623406:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:45993:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37568 dst: /127.0.0.1:45993 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T04:57:14,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45993 is added to blk_-9223372036854775584_1033 (size=127) 2024-12-10T04:57:14,069 WARN [M:0;6578523f4421:32843 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T04:57:14,069 INFO [M:0;6578523f4421:32843 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-10T04:57:14,069 INFO [M:0;6578523f4421:32843 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-10T04:57:14,069 DEBUG [M:0;6578523f4421:32843 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-10T04:57:14,070 INFO [M:0;6578523f4421:32843 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T04:57:14,070 DEBUG [M:0;6578523f4421:32843 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T04:57:14,070 DEBUG [M:0;6578523f4421:32843 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-10T04:57:14,070 DEBUG [M:0;6578523f4421:32843 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T04:57:14,070 INFO [M:0;6578523f4421:32843 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.83 KB heapSize=34.12 KB 2024-12-10T04:57:14,091 DEBUG [M:0;6578523f4421:32843 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/af3c614cc86e49618685d05e348b012a is 82, key is hbase:meta,,1/info:regioninfo/1733806631487/Put/seqid=0 2024-12-10T04:57:14,093 WARN [M:0;6578523f4421:32843 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T04:57:14,093 WARN [M:0;6578523f4421:32843 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T04:57:14,096 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-841780559_22 at /127.0.0.1:37576 [Receiving block BP-1791055191-172.17.0.2-1733806623406:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:45993:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37576 dst: /127.0.0.1:45993 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T04:57:14,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45993 is added to blk_-9223372036854775568_1035 (size=5672) 2024-12-10T04:57:14,100 WARN [M:0;6578523f4421:32843 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T04:57:14,100 INFO [M:0;6578523f4421:32843 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/af3c614cc86e49618685d05e348b012a 2024-12-10T04:57:14,122 DEBUG [M:0;6578523f4421:32843 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/96c1b881c64e4853a16bdc5acec05a0c is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733806632254/Put/seqid=0 2024-12-10T04:57:14,123 WARN [M:0;6578523f4421:32843 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T04:57:14,123 WARN [M:0;6578523f4421:32843 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T04:57:14,126 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-841780559_22 at /127.0.0.1:47904 [Receiving block BP-1791055191-172.17.0.2-1733806623406:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:44817:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47904 dst: /127.0.0.1:44817 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T04:57:14,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44817 is added to blk_-9223372036854775552_1037 (size=6439) 2024-12-10T04:57:14,130 WARN [M:0;6578523f4421:32843 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T04:57:14,131 INFO [M:0;6578523f4421:32843 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.14 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/96c1b881c64e4853a16bdc5acec05a0c 2024-12-10T04:57:14,132 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45887-0x1000e15a10d0002, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T04:57:14,132 INFO [RS:1;6578523f4421:45887 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-10T04:57:14,132 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45887-0x1000e15a10d0002, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T04:57:14,132 INFO [RS:1;6578523f4421:45887 {}] regionserver.HRegionServer(1031): Exiting; stopping=6578523f4421,45887,1733806629106; zookeeper connection closed. 2024-12-10T04:57:14,132 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@eb1327e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@eb1327e 2024-12-10T04:57:14,133 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-10T04:57:14,153 DEBUG [M:0;6578523f4421:32843 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c8c3b43eb946450796092ceacc404fb4 is 69, key is 6578523f4421,37799,1733806629213/rs:state/1733806630507/Put/seqid=0 2024-12-10T04:57:14,155 WARN [M:0;6578523f4421:32843 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T04:57:14,155 WARN [M:0;6578523f4421:32843 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-10T04:57:14,158 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-841780559_22 at /127.0.0.1:37590 [Receiving block BP-1791055191-172.17.0.2-1733806623406:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:45993:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37590 dst: /127.0.0.1:45993 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T04:57:14,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45993 is added to blk_-9223372036854775536_1039 (size=5294) 2024-12-10T04:57:14,162 WARN [M:0;6578523f4421:32843 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-10T04:57:14,162 INFO [M:0;6578523f4421:32843 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c8c3b43eb946450796092ceacc404fb4 2024-12-10T04:57:14,171 DEBUG [M:0;6578523f4421:32843 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/af3c614cc86e49618685d05e348b012a as hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/af3c614cc86e49618685d05e348b012a 2024-12-10T04:57:14,180 INFO [M:0;6578523f4421:32843 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/af3c614cc86e49618685d05e348b012a, entries=8, sequenceid=72, filesize=5.5 K 2024-12-10T04:57:14,181 DEBUG [M:0;6578523f4421:32843 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/96c1b881c64e4853a16bdc5acec05a0c as hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/96c1b881c64e4853a16bdc5acec05a0c 2024-12-10T04:57:14,189 INFO [M:0;6578523f4421:32843 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/96c1b881c64e4853a16bdc5acec05a0c, entries=8, sequenceid=72, filesize=6.3 K 2024-12-10T04:57:14,191 DEBUG [M:0;6578523f4421:32843 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c8c3b43eb946450796092ceacc404fb4 as hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c8c3b43eb946450796092ceacc404fb4 2024-12-10T04:57:14,199 INFO [M:0;6578523f4421:32843 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c8c3b43eb946450796092ceacc404fb4, entries=3, sequenceid=72, filesize=5.2 K 2024-12-10T04:57:14,200 INFO [M:0;6578523f4421:32843 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 130ms, sequenceid=72, compaction requested=false 2024-12-10T04:57:14,201 INFO [M:0;6578523f4421:32843 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T04:57:14,201 DEBUG [M:0;6578523f4421:32843 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733806634069Disabling compacts and flushes for region at 1733806634069Disabling writes for close at 1733806634070 (+1 ms)Obtaining lock to block concurrent updates at 1733806634070Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733806634070Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27471, getHeapSize=34872, getOffHeapSize=0, getCellsCount=85 at 1733806634071 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733806634072 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733806634072Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733806634090 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733806634090Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733806634107 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733806634121 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733806634121Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733806634139 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733806634153 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733806634153Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1bd399a4: reopening flushed file at 1733806634170 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6d9ef71: reopening flushed file at 1733806634180 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6c517bf2: reopening flushed file at 1733806634190 (+10 ms)Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 130ms, sequenceid=72, compaction requested=false at 1733806634200 (+10 ms)Writing region close event to WAL at 1733806634201 (+1 ms)Closed at 1733806634201 2024-12-10T04:57:14,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44817 is added to blk_1073741825_1011 (size=32674) 2024-12-10T04:57:14,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43693 is added to blk_1073741825_1011 (size=32674) 2024-12-10T04:57:14,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45993 is added to blk_1073741825_1011 (size=32674) 2024-12-10T04:57:14,205 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-10T04:57:14,205 INFO [M:0;6578523f4421:32843 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-10T04:57:14,206 INFO [M:0;6578523f4421:32843 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:32843 2024-12-10T04:57:14,206 INFO [M:0;6578523f4421:32843 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-10T04:57:14,353 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32843-0x1000e15a10d0000, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T04:57:14,353 INFO [M:0;6578523f4421:32843 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-10T04:57:14,353 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32843-0x1000e15a10d0000, quorum=127.0.0.1:49472, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T04:57:14,363 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2e59159d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T04:57:14,366 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@a8e922f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-10T04:57:14,366 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-10T04:57:14,366 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@24f92c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-10T04:57:14,366 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c62369b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0/hadoop.log.dir/,STOPPED} 2024-12-10T04:57:14,368 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-10T04:57:14,368 WARN [BP-1791055191-172.17.0.2-1733806623406 heartbeating to localhost/127.0.0.1:42739 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-10T04:57:14,368 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-10T04:57:14,368 WARN [BP-1791055191-172.17.0.2-1733806623406 heartbeating to localhost/127.0.0.1:42739 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1791055191-172.17.0.2-1733806623406 (Datanode Uuid 63c63997-802b-4298-97ae-6b2172b1f39f) service to localhost/127.0.0.1:42739 2024-12-10T04:57:14,369 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0/cluster_6c5e2b68-39d0-c07f-3b61-774f8639accf/data/data5/current/BP-1791055191-172.17.0.2-1733806623406 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T04:57:14,369 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0/cluster_6c5e2b68-39d0-c07f-3b61-774f8639accf/data/data6/current/BP-1791055191-172.17.0.2-1733806623406 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T04:57:14,370 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-10T04:57:14,373 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1c6b8f01{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T04:57:14,374 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@11f28dd2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-10T04:57:14,374 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-10T04:57:14,374 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fa8fa5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-10T04:57:14,374 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6463ad04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0/hadoop.log.dir/,STOPPED} 2024-12-10T04:57:14,375 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-10T04:57:14,375 WARN [BP-1791055191-172.17.0.2-1733806623406 heartbeating to localhost/127.0.0.1:42739 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-10T04:57:14,375 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-10T04:57:14,375 WARN [BP-1791055191-172.17.0.2-1733806623406 heartbeating to localhost/127.0.0.1:42739 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1791055191-172.17.0.2-1733806623406 (Datanode Uuid b77a1154-46fd-41e2-9a19-30e23aec2da5) service to localhost/127.0.0.1:42739 2024-12-10T04:57:14,376 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0/cluster_6c5e2b68-39d0-c07f-3b61-774f8639accf/data/data3/current/BP-1791055191-172.17.0.2-1733806623406 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T04:57:14,376 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0/cluster_6c5e2b68-39d0-c07f-3b61-774f8639accf/data/data4/current/BP-1791055191-172.17.0.2-1733806623406 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T04:57:14,377 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-10T04:57:14,379 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4839957b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T04:57:14,379 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5306f615{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-10T04:57:14,379 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-10T04:57:14,379 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a2478ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-10T04:57:14,379 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@550154bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0/hadoop.log.dir/,STOPPED} 2024-12-10T04:57:14,381 WARN [BP-1791055191-172.17.0.2-1733806623406 heartbeating to localhost/127.0.0.1:42739 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-10T04:57:14,381 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-10T04:57:14,381 WARN [BP-1791055191-172.17.0.2-1733806623406 heartbeating to localhost/127.0.0.1:42739 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1791055191-172.17.0.2-1733806623406 (Datanode Uuid 029eb7d9-cf8c-4f99-a348-85773f3a1a70) service to localhost/127.0.0.1:42739 2024-12-10T04:57:14,381 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-10T04:57:14,381 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0/cluster_6c5e2b68-39d0-c07f-3b61-774f8639accf/data/data1/current/BP-1791055191-172.17.0.2-1733806623406 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T04:57:14,382 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0/cluster_6c5e2b68-39d0-c07f-3b61-774f8639accf/data/data2/current/BP-1791055191-172.17.0.2-1733806623406 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T04:57:14,382 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-10T04:57:14,389 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@76e4c45c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-10T04:57:14,389 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-10T04:57:14,389 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-10T04:57:14,390 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-10T04:57:14,390 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0/hadoop.log.dir/,STOPPED} 2024-12-10T04:57:14,399 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-10T04:57:14,426 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-10T04:57:14,431 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=86 (was 157), OpenFileDescriptor=443 (was 391) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=239 (was 255), ProcessCount=11 (was 11), AvailableMemoryMB=5368 (was 5647) 2024-12-10T04:57:14,438 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=86, OpenFileDescriptor=443, MaxFileDescriptor=1048576, SystemLoadAverage=239, ProcessCount=11, AvailableMemoryMB=5368 2024-12-10T04:57:14,438 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-10T04:57:14,438 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0/hadoop.log.dir so I do NOT create it in target/test-data/dba231f3-a715-59c1-a486-bc826d044acb 2024-12-10T04:57:14,438 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3193be9d-d2b4-c6c0-b654-5684e1c5d6c0/hadoop.tmp.dir so I do NOT create it in target/test-data/dba231f3-a715-59c1-a486-bc826d044acb 2024-12-10T04:57:14,438 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dba231f3-a715-59c1-a486-bc826d044acb/cluster_07576dd4-2f1d-984b-8307-16a4a5fff71c, deleteOnExit=true 2024-12-10T04:57:14,438 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-10T04:57:14,438 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dba231f3-a715-59c1-a486-bc826d044acb/test.cache.data in system properties and HBase conf 2024-12-10T04:57:14,439 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dba231f3-a715-59c1-a486-bc826d044acb/hadoop.tmp.dir in system properties and HBase conf 2024-12-10T04:57:14,439 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dba231f3-a715-59c1-a486-bc826d044acb/hadoop.log.dir in system properties and HBase conf 2024-12-10T04:57:14,439 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dba231f3-a715-59c1-a486-bc826d044acb/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-10T04:57:14,439 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dba231f3-a715-59c1-a486-bc826d044acb/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-10T04:57:14,439 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-10T04:57:14,439 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-10T04:57:14,439 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dba231f3-a715-59c1-a486-bc826d044acb/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-10T04:57:14,439 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dba231f3-a715-59c1-a486-bc826d044acb/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-10T04:57:14,440 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dba231f3-a715-59c1-a486-bc826d044acb/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-10T04:57:14,440 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dba231f3-a715-59c1-a486-bc826d044acb/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-10T04:57:14,440 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dba231f3-a715-59c1-a486-bc826d044acb/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-10T04:57:14,440 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dba231f3-a715-59c1-a486-bc826d044acb/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-10T04:57:14,440 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dba231f3-a715-59c1-a486-bc826d044acb/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-10T04:57:14,440 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dba231f3-a715-59c1-a486-bc826d044acb/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-10T04:57:14,440 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dba231f3-a715-59c1-a486-bc826d044acb/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-10T04:57:14,440 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dba231f3-a715-59c1-a486-bc826d044acb/nfs.dump.dir in system properties and HBase conf 2024-12-10T04:57:14,440 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dba231f3-a715-59c1-a486-bc826d044acb/java.io.tmpdir in system properties and HBase conf 2024-12-10T04:57:14,440 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dba231f3-a715-59c1-a486-bc826d044acb/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-10T04:57:14,440 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dba231f3-a715-59c1-a486-bc826d044acb/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-10T04:57:14,441 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dba231f3-a715-59c1-a486-bc826d044acb/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-10T04:57:14,835 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T04:57:14,840 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-10T04:57:14,841 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-10T04:57:14,841 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-10T04:57:14,842 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-10T04:57:14,842 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T04:57:14,843 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@a49b909{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dba231f3-a715-59c1-a486-bc826d044acb/hadoop.log.dir/,AVAILABLE} 2024-12-10T04:57:14,843 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@56aa9d3b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-10T04:57:14,933 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7aaeb6cf{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dba231f3-a715-59c1-a486-bc826d044acb/java.io.tmpdir/jetty-localhost-38925-hadoop-hdfs-3_4_1-tests_jar-_-any-5334723288645547357/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-10T04:57:14,934 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@686c9dd5{HTTP/1.1, (http/1.1)}{localhost:38925} 2024-12-10T04:57:14,934 INFO [Time-limited test {}] server.Server(415): Started @13329ms 2024-12-10T04:57:15,221 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T04:57:15,224 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-10T04:57:15,224 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-10T04:57:15,224 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-10T04:57:15,224 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-10T04:57:15,225 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2807f8c2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dba231f3-a715-59c1-a486-bc826d044acb/hadoop.log.dir/,AVAILABLE} 2024-12-10T04:57:15,225 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@61a92fea{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-10T04:57:15,314 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@38e5384{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dba231f3-a715-59c1-a486-bc826d044acb/java.io.tmpdir/jetty-localhost-35917-hadoop-hdfs-3_4_1-tests_jar-_-any-3303420121468081733/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T04:57:15,314 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7d6118e0{HTTP/1.1, (http/1.1)}{localhost:35917} 2024-12-10T04:57:15,314 INFO [Time-limited test {}] server.Server(415): Started @13710ms 2024-12-10T04:57:15,316 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-10T04:57:15,348 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T04:57:15,351 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-10T04:57:15,351 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-10T04:57:15,351 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-10T04:57:15,351 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-10T04:57:15,352 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5b4297c4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dba231f3-a715-59c1-a486-bc826d044acb/hadoop.log.dir/,AVAILABLE} 2024-12-10T04:57:15,352 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@bb1336{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-10T04:57:15,443 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6e5e4927{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dba231f3-a715-59c1-a486-bc826d044acb/java.io.tmpdir/jetty-localhost-46169-hadoop-hdfs-3_4_1-tests_jar-_-any-9587100110549537609/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T04:57:15,443 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1768a8c1{HTTP/1.1, (http/1.1)}{localhost:46169} 2024-12-10T04:57:15,443 INFO [Time-limited test {}] server.Server(415): Started @13839ms 2024-12-10T04:57:15,445 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-10T04:57:15,470 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T04:57:15,473 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-10T04:57:15,474 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-10T04:57:15,474 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-10T04:57:15,474 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-10T04:57:15,474 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6e0095f0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dba231f3-a715-59c1-a486-bc826d044acb/hadoop.log.dir/,AVAILABLE} 2024-12-10T04:57:15,474 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@38da8210{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-10T04:57:15,563 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@bff0a43{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dba231f3-a715-59c1-a486-bc826d044acb/java.io.tmpdir/jetty-localhost-46459-hadoop-hdfs-3_4_1-tests_jar-_-any-7979803018384062943/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T04:57:15,564 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@19dff04d{HTTP/1.1, (http/1.1)}{localhost:46459} 2024-12-10T04:57:15,564 INFO [Time-limited test {}] server.Server(415): Started @13960ms 2024-12-10T04:57:15,565 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-10T04:57:16,630 WARN [Thread-557 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dba231f3-a715-59c1-a486-bc826d044acb/cluster_07576dd4-2f1d-984b-8307-16a4a5fff71c/data/data1/current/BP-1730488423-172.17.0.2-1733806634465/current, will proceed with Du for space computation calculation, 2024-12-10T04:57:16,630 WARN [Thread-558 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dba231f3-a715-59c1-a486-bc826d044acb/cluster_07576dd4-2f1d-984b-8307-16a4a5fff71c/data/data2/current/BP-1730488423-172.17.0.2-1733806634465/current, will proceed with Du for space computation calculation, 2024-12-10T04:57:16,649 WARN [Thread-497 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-10T04:57:16,651 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb8c2d893ef7ba6f7 with lease ID 0x60690608ac505d31: Processing first storage report for DS-d56f82b9-3ac8-48ff-86bb-f8fd61b387be from datanode DatanodeRegistration(127.0.0.1:39149, datanodeUuid=f7181527-9d5e-4803-8a1f-0c82a73076b9, infoPort=40633, infoSecurePort=0, ipcPort=35217, storageInfo=lv=-57;cid=testClusterID;nsid=1627416412;c=1733806634465) 2024-12-10T04:57:16,651 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb8c2d893ef7ba6f7 with lease ID 0x60690608ac505d31: from storage DS-d56f82b9-3ac8-48ff-86bb-f8fd61b387be node DatanodeRegistration(127.0.0.1:39149, datanodeUuid=f7181527-9d5e-4803-8a1f-0c82a73076b9, infoPort=40633, infoSecurePort=0, ipcPort=35217, storageInfo=lv=-57;cid=testClusterID;nsid=1627416412;c=1733806634465), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-10T04:57:16,652 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb8c2d893ef7ba6f7 with lease ID 0x60690608ac505d31: Processing first storage report for DS-bf46b53f-cd8c-48a1-ab83-a0fa4963e2f5 from datanode DatanodeRegistration(127.0.0.1:39149, datanodeUuid=f7181527-9d5e-4803-8a1f-0c82a73076b9, infoPort=40633, infoSecurePort=0, ipcPort=35217, storageInfo=lv=-57;cid=testClusterID;nsid=1627416412;c=1733806634465) 2024-12-10T04:57:16,652 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb8c2d893ef7ba6f7 with lease ID 0x60690608ac505d31: from storage DS-bf46b53f-cd8c-48a1-ab83-a0fa4963e2f5 node DatanodeRegistration(127.0.0.1:39149, datanodeUuid=f7181527-9d5e-4803-8a1f-0c82a73076b9, infoPort=40633, infoSecurePort=0, ipcPort=35217, storageInfo=lv=-57;cid=testClusterID;nsid=1627416412;c=1733806634465), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-10T04:57:16,886 WARN [Thread-568 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dba231f3-a715-59c1-a486-bc826d044acb/cluster_07576dd4-2f1d-984b-8307-16a4a5fff71c/data/data3/current/BP-1730488423-172.17.0.2-1733806634465/current, will proceed with Du for space computation calculation, 2024-12-10T04:57:16,888 WARN [Thread-569 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dba231f3-a715-59c1-a486-bc826d044acb/cluster_07576dd4-2f1d-984b-8307-16a4a5fff71c/data/data4/current/BP-1730488423-172.17.0.2-1733806634465/current, will proceed with Du for space computation calculation, 2024-12-10T04:57:16,905 WARN [Thread-520 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-10T04:57:16,908 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdec4dc64c9017681 with lease ID 0x60690608ac505d32: Processing first storage report for DS-71038fe1-4abf-41cc-a2ca-8cd12347c6db from datanode DatanodeRegistration(127.0.0.1:42773, datanodeUuid=477fa2ae-3c0a-4e26-b71f-eedbdb12b430, infoPort=33263, infoSecurePort=0, ipcPort=41243, storageInfo=lv=-57;cid=testClusterID;nsid=1627416412;c=1733806634465) 2024-12-10T04:57:16,908 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdec4dc64c9017681 with lease ID 0x60690608ac505d32: from storage DS-71038fe1-4abf-41cc-a2ca-8cd12347c6db node DatanodeRegistration(127.0.0.1:42773, datanodeUuid=477fa2ae-3c0a-4e26-b71f-eedbdb12b430, infoPort=33263, infoSecurePort=0, ipcPort=41243, storageInfo=lv=-57;cid=testClusterID;nsid=1627416412;c=1733806634465), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-10T04:57:16,909 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdec4dc64c9017681 with lease ID 0x60690608ac505d32: Processing first storage report for DS-14dc1887-b12c-45f1-9fa7-da0990c1446b from datanode DatanodeRegistration(127.0.0.1:42773, datanodeUuid=477fa2ae-3c0a-4e26-b71f-eedbdb12b430, infoPort=33263, infoSecurePort=0, ipcPort=41243, storageInfo=lv=-57;cid=testClusterID;nsid=1627416412;c=1733806634465) 2024-12-10T04:57:16,909 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdec4dc64c9017681 with lease ID 0x60690608ac505d32: from storage DS-14dc1887-b12c-45f1-9fa7-da0990c1446b node DatanodeRegistration(127.0.0.1:42773, datanodeUuid=477fa2ae-3c0a-4e26-b71f-eedbdb12b430, infoPort=33263, infoSecurePort=0, ipcPort=41243, storageInfo=lv=-57;cid=testClusterID;nsid=1627416412;c=1733806634465), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-10T04:57:16,953 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-10T04:57:16,997 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-10T04:57:16,998 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-10T04:57:17,007 WARN [Thread-581 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dba231f3-a715-59c1-a486-bc826d044acb/cluster_07576dd4-2f1d-984b-8307-16a4a5fff71c/data/data6/current/BP-1730488423-172.17.0.2-1733806634465/current, will proceed with Du for space computation calculation, 2024-12-10T04:57:17,007 WARN [Thread-580 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dba231f3-a715-59c1-a486-bc826d044acb/cluster_07576dd4-2f1d-984b-8307-16a4a5fff71c/data/data5/current/BP-1730488423-172.17.0.2-1733806634465/current, will proceed with Du for space computation calculation, 2024-12-10T04:57:17,023 WARN [Thread-542 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-10T04:57:17,026 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8ab527af4eef65c6 with lease ID 0x60690608ac505d33: Processing first storage report for DS-17c7f519-a637-4f65-9551-ee0e8034a9b2 from datanode DatanodeRegistration(127.0.0.1:41657, datanodeUuid=f6f48f00-035b-4a1c-8b6c-08f1930da7b2, infoPort=43517, infoSecurePort=0, ipcPort=35875, storageInfo=lv=-57;cid=testClusterID;nsid=1627416412;c=1733806634465) 2024-12-10T04:57:17,026 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8ab527af4eef65c6 with lease ID 0x60690608ac505d33: from storage DS-17c7f519-a637-4f65-9551-ee0e8034a9b2 node DatanodeRegistration(127.0.0.1:41657, datanodeUuid=f6f48f00-035b-4a1c-8b6c-08f1930da7b2, infoPort=43517, infoSecurePort=0, ipcPort=35875, storageInfo=lv=-57;cid=testClusterID;nsid=1627416412;c=1733806634465), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-10T04:57:17,026 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8ab527af4eef65c6 with lease ID 0x60690608ac505d33: Processing first storage report for DS-1bac6612-6a2d-497f-b659-c5283402b456 from datanode DatanodeRegistration(127.0.0.1:41657, datanodeUuid=f6f48f00-035b-4a1c-8b6c-08f1930da7b2, infoPort=43517, infoSecurePort=0, ipcPort=35875, storageInfo=lv=-57;cid=testClusterID;nsid=1627416412;c=1733806634465) 2024-12-10T04:57:17,026 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8ab527af4eef65c6 with lease ID 0x60690608ac505d33: from storage DS-1bac6612-6a2d-497f-b659-c5283402b456 node DatanodeRegistration(127.0.0.1:41657, datanodeUuid=f6f48f00-035b-4a1c-8b6c-08f1930da7b2, infoPort=43517, infoSecurePort=0, ipcPort=35875, storageInfo=lv=-57;cid=testClusterID;nsid=1627416412;c=1733806634465), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-10T04:57:17,106 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dba231f3-a715-59c1-a486-bc826d044acb 2024-12-10T04:57:17,111 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dba231f3-a715-59c1-a486-bc826d044acb/cluster_07576dd4-2f1d-984b-8307-16a4a5fff71c/zookeeper_0, clientPort=51167, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dba231f3-a715-59c1-a486-bc826d044acb/cluster_07576dd4-2f1d-984b-8307-16a4a5fff71c/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dba231f3-a715-59c1-a486-bc826d044acb/cluster_07576dd4-2f1d-984b-8307-16a4a5fff71c/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-10T04:57:17,113 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51167 2024-12-10T04:57:17,113 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T04:57:17,116 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T04:57:17,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39149 is added to blk_1073741825_1001 (size=7) 2024-12-10T04:57:17,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41657 is added to blk_1073741825_1001 (size=7) 2024-12-10T04:57:17,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42773 is added to blk_1073741825_1001 (size=7) 2024-12-10T04:57:17,133 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589 with version=8 2024-12-10T04:57:17,133 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:42739/user/jenkins/test-data/72f1e4ff-c567-97b5-9013-fca4c0405c34/hbase-staging 2024-12-10T04:57:17,136 INFO [Time-limited test {}] client.ConnectionUtils(128): master/6578523f4421:0 server-side Connection retries=45 2024-12-10T04:57:17,136 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T04:57:17,136 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-10T04:57:17,136 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-10T04:57:17,136 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T04:57:17,137 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-10T04:57:17,137 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-10T04:57:17,137 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-10T04:57:17,138 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39099 2024-12-10T04:57:17,139 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39099 connecting to ZooKeeper ensemble=127.0.0.1:51167 2024-12-10T04:57:17,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:390990x0, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-10T04:57:17,198 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39099-0x1000e15c6fc0000 connected 2024-12-10T04:57:17,284 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T04:57:17,286 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T04:57:17,289 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39099-0x1000e15c6fc0000, quorum=127.0.0.1:51167, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T04:57:17,289 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589, hbase.cluster.distributed=false 2024-12-10T04:57:17,291 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39099-0x1000e15c6fc0000, quorum=127.0.0.1:51167, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-10T04:57:17,291 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39099 2024-12-10T04:57:17,292 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39099 2024-12-10T04:57:17,292 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39099 2024-12-10T04:57:17,292 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39099 2024-12-10T04:57:17,292 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39099 2024-12-10T04:57:17,309 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/6578523f4421:0 server-side Connection retries=45 2024-12-10T04:57:17,309 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T04:57:17,310 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-10T04:57:17,310 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-10T04:57:17,310 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T04:57:17,310 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-10T04:57:17,310 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-10T04:57:17,310 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-10T04:57:17,310 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33029 2024-12-10T04:57:17,312 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33029 connecting to ZooKeeper ensemble=127.0.0.1:51167 2024-12-10T04:57:17,313 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T04:57:17,314 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T04:57:17,326 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:330290x0, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-10T04:57:17,327 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33029-0x1000e15c6fc0001, quorum=127.0.0.1:51167, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T04:57:17,327 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33029-0x1000e15c6fc0001 connected 2024-12-10T04:57:17,327 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-10T04:57:17,327 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-10T04:57:17,328 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33029-0x1000e15c6fc0001, quorum=127.0.0.1:51167, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-10T04:57:17,329 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33029-0x1000e15c6fc0001, quorum=127.0.0.1:51167, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-10T04:57:17,330 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33029 2024-12-10T04:57:17,330 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33029 2024-12-10T04:57:17,330 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33029 2024-12-10T04:57:17,331 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33029 2024-12-10T04:57:17,331 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33029 2024-12-10T04:57:17,348 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/6578523f4421:0 server-side Connection retries=45 2024-12-10T04:57:17,348 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T04:57:17,348 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-10T04:57:17,348 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-10T04:57:17,348 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T04:57:17,348 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-10T04:57:17,348 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-10T04:57:17,349 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-10T04:57:17,349 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46693 2024-12-10T04:57:17,350 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46693 connecting to ZooKeeper ensemble=127.0.0.1:51167 2024-12-10T04:57:17,351 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T04:57:17,353 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T04:57:17,367 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:466930x0, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-10T04:57:17,367 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46693-0x1000e15c6fc0002 connected 2024-12-10T04:57:17,367 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46693-0x1000e15c6fc0002, quorum=127.0.0.1:51167, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T04:57:17,368 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-10T04:57:17,368 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-10T04:57:17,369 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46693-0x1000e15c6fc0002, quorum=127.0.0.1:51167, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-10T04:57:17,370 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46693-0x1000e15c6fc0002, quorum=127.0.0.1:51167, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-10T04:57:17,371 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46693 2024-12-10T04:57:17,371 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46693 2024-12-10T04:57:17,371 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46693 2024-12-10T04:57:17,372 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46693 2024-12-10T04:57:17,372 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46693 2024-12-10T04:57:17,388 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/6578523f4421:0 server-side Connection retries=45 2024-12-10T04:57:17,388 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T04:57:17,388 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-10T04:57:17,388 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-10T04:57:17,388 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T04:57:17,388 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-10T04:57:17,388 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-10T04:57:17,388 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-10T04:57:17,389 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45069 2024-12-10T04:57:17,390 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45069 connecting to ZooKeeper ensemble=127.0.0.1:51167 2024-12-10T04:57:17,391 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T04:57:17,392 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T04:57:17,400 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:450690x0, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-10T04:57:17,400 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45069-0x1000e15c6fc0003 connected 2024-12-10T04:57:17,400 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45069-0x1000e15c6fc0003, quorum=127.0.0.1:51167, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T04:57:17,401 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-10T04:57:17,401 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-10T04:57:17,402 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45069-0x1000e15c6fc0003, quorum=127.0.0.1:51167, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-10T04:57:17,403 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45069-0x1000e15c6fc0003, quorum=127.0.0.1:51167, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-10T04:57:17,403 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45069 2024-12-10T04:57:17,404 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45069 2024-12-10T04:57:17,404 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45069 2024-12-10T04:57:17,404 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45069 2024-12-10T04:57:17,407 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45069 2024-12-10T04:57:17,418 DEBUG [M:0;6578523f4421:39099 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;6578523f4421:39099 2024-12-10T04:57:17,419 INFO [master/6578523f4421:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/6578523f4421,39099,1733806637136 2024-12-10T04:57:17,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33029-0x1000e15c6fc0001, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T04:57:17,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45069-0x1000e15c6fc0003, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T04:57:17,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39099-0x1000e15c6fc0000, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T04:57:17,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46693-0x1000e15c6fc0002, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T04:57:17,432 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39099-0x1000e15c6fc0000, quorum=127.0.0.1:51167, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/6578523f4421,39099,1733806637136 2024-12-10T04:57:17,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45069-0x1000e15c6fc0003, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-10T04:57:17,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33029-0x1000e15c6fc0001, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-10T04:57:17,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46693-0x1000e15c6fc0002, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-10T04:57:17,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39099-0x1000e15c6fc0000, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:17,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45069-0x1000e15c6fc0003, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:17,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33029-0x1000e15c6fc0001, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:17,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46693-0x1000e15c6fc0002, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:17,443 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39099-0x1000e15c6fc0000, quorum=127.0.0.1:51167, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-10T04:57:17,444 INFO [master/6578523f4421:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/6578523f4421,39099,1733806637136 from backup master directory 2024-12-10T04:57:17,452 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39099-0x1000e15c6fc0000, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/6578523f4421,39099,1733806637136 2024-12-10T04:57:17,452 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45069-0x1000e15c6fc0003, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T04:57:17,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33029-0x1000e15c6fc0001, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T04:57:17,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46693-0x1000e15c6fc0002, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T04:57:17,453 WARN [master/6578523f4421:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-10T04:57:17,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39099-0x1000e15c6fc0000, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T04:57:17,453 INFO [master/6578523f4421:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=6578523f4421,39099,1733806637136 2024-12-10T04:57:17,462 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/hbase.id] with ID: f91e6b47-d067-4027-a7b7-36c101a8d8e9 2024-12-10T04:57:17,462 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/.tmp/hbase.id 2024-12-10T04:57:17,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39149 is added to blk_1073741826_1002 (size=42) 2024-12-10T04:57:17,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42773 is added to blk_1073741826_1002 (size=42) 2024-12-10T04:57:17,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41657 is added to blk_1073741826_1002 (size=42) 2024-12-10T04:57:17,471 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/.tmp/hbase.id]:[hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/hbase.id] 2024-12-10T04:57:17,486 INFO [master/6578523f4421:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T04:57:17,486 INFO [master/6578523f4421:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-10T04:57:17,488 INFO [master/6578523f4421:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-10T04:57:17,494 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39099-0x1000e15c6fc0000, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:17,494 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45069-0x1000e15c6fc0003, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:17,494 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46693-0x1000e15c6fc0002, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:17,494 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33029-0x1000e15c6fc0001, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:17,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41657 is added to blk_1073741827_1003 (size=196) 2024-12-10T04:57:17,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42773 is added to blk_1073741827_1003 (size=196) 2024-12-10T04:57:17,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39149 is added to blk_1073741827_1003 (size=196) 2024-12-10T04:57:17,506 INFO [master/6578523f4421:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T04:57:17,507 INFO [master/6578523f4421:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-10T04:57:17,510 INFO [master/6578523f4421:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-10T04:57:17,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41657 is added to blk_1073741828_1004 (size=1189) 2024-12-10T04:57:17,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42773 is added to blk_1073741828_1004 (size=1189) 2024-12-10T04:57:17,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39149 is added to blk_1073741828_1004 (size=1189) 2024-12-10T04:57:17,522 INFO [master/6578523f4421:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/MasterData/data/master/store 2024-12-10T04:57:17,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39149 is added to blk_1073741829_1005 (size=34) 2024-12-10T04:57:17,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41657 is added to blk_1073741829_1005 (size=34) 2024-12-10T04:57:17,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42773 is added to blk_1073741829_1005 (size=34) 2024-12-10T04:57:17,532 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T04:57:17,532 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-10T04:57:17,532 INFO [master/6578523f4421:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T04:57:17,532 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T04:57:17,532 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-10T04:57:17,532 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T04:57:17,532 INFO [master/6578523f4421:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T04:57:17,532 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733806637532Disabling compacts and flushes for region at 1733806637532Disabling writes for close at 1733806637532Writing region close event to WAL at 1733806637532Closed at 1733806637532 2024-12-10T04:57:17,533 WARN [master/6578523f4421:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/MasterData/data/master/store/.initializing 2024-12-10T04:57:17,533 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/MasterData/WALs/6578523f4421,39099,1733806637136 2024-12-10T04:57:17,537 INFO [master/6578523f4421:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6578523f4421%2C39099%2C1733806637136, suffix=, logDir=hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/MasterData/WALs/6578523f4421,39099,1733806637136, archiveDir=hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/MasterData/oldWALs, maxLogs=10 2024-12-10T04:57:17,537 INFO [master/6578523f4421:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6578523f4421%2C39099%2C1733806637136.1733806637537 2024-12-10T04:57:17,547 INFO [master/6578523f4421:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/MasterData/WALs/6578523f4421,39099,1733806637136/6578523f4421%2C39099%2C1733806637136.1733806637537 2024-12-10T04:57:17,549 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40633:40633),(127.0.0.1/127.0.0.1:43517:43517),(127.0.0.1/127.0.0.1:33263:33263)] 2024-12-10T04:57:17,549 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-10T04:57:17,549 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T04:57:17,549 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T04:57:17,549 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T04:57:17,551 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-10T04:57:17,553 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-10T04:57:17,553 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T04:57:17,554 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T04:57:17,554 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-10T04:57:17,555 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-10T04:57:17,556 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T04:57:17,556 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T04:57:17,556 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-10T04:57:17,558 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-10T04:57:17,559 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T04:57:17,559 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T04:57:17,559 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-10T04:57:17,561 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-10T04:57:17,561 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T04:57:17,562 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T04:57:17,562 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T04:57:17,563 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-10T04:57:17,563 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-10T04:57:17,565 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T04:57:17,565 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T04:57:17,565 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-10T04:57:17,566 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T04:57:17,569 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T04:57:17,569 INFO [master/6578523f4421:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75468645, jitterRate=0.12457044422626495}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-10T04:57:17,570 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733806637550Initializing all the Stores at 1733806637551 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733806637551Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733806637551Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733806637551Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733806637551Cleaning up temporary data from old regions at 1733806637565 (+14 ms)Region opened successfully at 1733806637570 (+5 ms) 2024-12-10T04:57:17,571 INFO [master/6578523f4421:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-10T04:57:17,575 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e9ecd2f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6578523f4421/172.17.0.2:0 2024-12-10T04:57:17,576 INFO [master/6578523f4421:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-10T04:57:17,576 INFO [master/6578523f4421:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-10T04:57:17,576 INFO [master/6578523f4421:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-10T04:57:17,576 INFO [master/6578523f4421:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-10T04:57:17,577 INFO [master/6578523f4421:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-10T04:57:17,577 INFO [master/6578523f4421:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-10T04:57:17,577 INFO [master/6578523f4421:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-10T04:57:17,580 INFO [master/6578523f4421:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-10T04:57:17,582 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39099-0x1000e15c6fc0000, quorum=127.0.0.1:51167, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-10T04:57:17,589 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-10T04:57:17,590 INFO [master/6578523f4421:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-10T04:57:17,591 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39099-0x1000e15c6fc0000, quorum=127.0.0.1:51167, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-10T04:57:17,600 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-10T04:57:17,600 INFO [master/6578523f4421:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-10T04:57:17,601 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39099-0x1000e15c6fc0000, quorum=127.0.0.1:51167, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-10T04:57:17,610 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-10T04:57:17,611 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39099-0x1000e15c6fc0000, quorum=127.0.0.1:51167, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-10T04:57:17,620 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-10T04:57:17,622 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39099-0x1000e15c6fc0000, quorum=127.0.0.1:51167, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-10T04:57:17,631 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-10T04:57:17,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39099-0x1000e15c6fc0000, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-10T04:57:17,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45069-0x1000e15c6fc0003, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-10T04:57:17,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46693-0x1000e15c6fc0002, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-10T04:57:17,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33029-0x1000e15c6fc0001, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-10T04:57:17,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39099-0x1000e15c6fc0000, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:17,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45069-0x1000e15c6fc0003, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:17,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46693-0x1000e15c6fc0002, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:17,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33029-0x1000e15c6fc0001, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:17,643 INFO [master/6578523f4421:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=6578523f4421,39099,1733806637136, sessionid=0x1000e15c6fc0000, setting cluster-up flag (Was=false) 2024-12-10T04:57:17,663 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39099-0x1000e15c6fc0000, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:17,663 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45069-0x1000e15c6fc0003, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:17,663 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46693-0x1000e15c6fc0002, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:17,663 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33029-0x1000e15c6fc0001, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:17,695 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-10T04:57:17,698 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6578523f4421,39099,1733806637136 2024-12-10T04:57:17,726 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45069-0x1000e15c6fc0003, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:17,726 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39099-0x1000e15c6fc0000, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:17,726 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33029-0x1000e15c6fc0001, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:17,726 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46693-0x1000e15c6fc0002, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:17,758 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-10T04:57:17,759 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6578523f4421,39099,1733806637136 2024-12-10T04:57:17,761 INFO [master/6578523f4421:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-10T04:57:17,763 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-10T04:57:17,763 INFO [master/6578523f4421:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-10T04:57:17,764 INFO [master/6578523f4421:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-10T04:57:17,764 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 6578523f4421,39099,1733806637136 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-10T04:57:17,765 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/6578523f4421:0, corePoolSize=5, maxPoolSize=5 2024-12-10T04:57:17,765 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/6578523f4421:0, corePoolSize=5, maxPoolSize=5 2024-12-10T04:57:17,765 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/6578523f4421:0, corePoolSize=5, maxPoolSize=5 2024-12-10T04:57:17,766 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/6578523f4421:0, corePoolSize=5, maxPoolSize=5 2024-12-10T04:57:17,766 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/6578523f4421:0, corePoolSize=10, maxPoolSize=10 2024-12-10T04:57:17,766 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:17,766 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/6578523f4421:0, corePoolSize=2, maxPoolSize=2 2024-12-10T04:57:17,766 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:17,766 INFO [master/6578523f4421:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733806667766 2024-12-10T04:57:17,767 INFO [master/6578523f4421:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-10T04:57:17,767 INFO [master/6578523f4421:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-10T04:57:17,767 INFO [master/6578523f4421:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-10T04:57:17,767 INFO [master/6578523f4421:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-10T04:57:17,767 INFO [master/6578523f4421:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-10T04:57:17,767 INFO [master/6578523f4421:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-10T04:57:17,767 INFO [master/6578523f4421:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:17,767 INFO [master/6578523f4421:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-10T04:57:17,767 INFO [master/6578523f4421:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-10T04:57:17,768 INFO [master/6578523f4421:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-10T04:57:17,768 INFO [master/6578523f4421:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-10T04:57:17,768 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-10T04:57:17,768 INFO [master/6578523f4421:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-10T04:57:17,768 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-10T04:57:17,768 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/6578523f4421:0:becomeActiveMaster-HFileCleaner.large.0-1733806637768,5,FailOnTimeoutGroup] 2024-12-10T04:57:17,768 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/6578523f4421:0:becomeActiveMaster-HFileCleaner.small.0-1733806637768,5,FailOnTimeoutGroup] 2024-12-10T04:57:17,769 INFO [master/6578523f4421:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:17,769 INFO [master/6578523f4421:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-10T04:57:17,769 INFO [master/6578523f4421:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:17,769 INFO [master/6578523f4421:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:17,769 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T04:57:17,769 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-10T04:57:17,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39149 is added to blk_1073741831_1007 (size=1321) 2024-12-10T04:57:17,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41657 is added to blk_1073741831_1007 (size=1321) 2024-12-10T04:57:17,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42773 is added to blk_1073741831_1007 (size=1321) 2024-12-10T04:57:17,782 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-10T04:57:17,783 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589 2024-12-10T04:57:17,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41657 is added to blk_1073741832_1008 (size=32) 2024-12-10T04:57:17,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42773 is added to blk_1073741832_1008 (size=32) 2024-12-10T04:57:17,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39149 is added to blk_1073741832_1008 (size=32) 2024-12-10T04:57:17,796 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T04:57:17,798 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-10T04:57:17,799 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-10T04:57:17,800 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T04:57:17,800 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T04:57:17,800 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-10T04:57:17,802 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-10T04:57:17,802 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T04:57:17,802 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T04:57:17,803 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-10T04:57:17,804 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-10T04:57:17,804 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T04:57:17,805 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T04:57:17,805 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-10T04:57:17,806 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-10T04:57:17,807 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T04:57:17,807 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T04:57:17,807 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-10T04:57:17,808 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/data/hbase/meta/1588230740 2024-12-10T04:57:17,809 INFO [RS:2;6578523f4421:45069 {}] regionserver.HRegionServer(746): ClusterId : f91e6b47-d067-4027-a7b7-36c101a8d8e9 2024-12-10T04:57:17,809 DEBUG [RS:2;6578523f4421:45069 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-10T04:57:17,809 INFO [RS:1;6578523f4421:46693 {}] regionserver.HRegionServer(746): ClusterId : f91e6b47-d067-4027-a7b7-36c101a8d8e9 2024-12-10T04:57:17,809 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/data/hbase/meta/1588230740 2024-12-10T04:57:17,809 DEBUG [RS:1;6578523f4421:46693 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-10T04:57:17,811 INFO [RS:0;6578523f4421:33029 {}] regionserver.HRegionServer(746): ClusterId : f91e6b47-d067-4027-a7b7-36c101a8d8e9 2024-12-10T04:57:17,811 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-10T04:57:17,811 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-10T04:57:17,811 DEBUG [RS:0;6578523f4421:33029 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-10T04:57:17,812 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-10T04:57:17,814 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-10T04:57:17,816 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T04:57:17,817 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70972591, jitterRate=0.05757401883602142}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-10T04:57:17,818 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733806637797Initializing all the Stores at 1733806637797Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733806637798 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733806637798Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733806637798Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733806637798Cleaning up temporary data from old regions at 1733806637811 (+13 ms)Region opened successfully at 1733806637818 (+7 ms) 2024-12-10T04:57:17,818 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-10T04:57:17,818 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-10T04:57:17,818 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-10T04:57:17,818 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-10T04:57:17,818 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-10T04:57:17,819 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-10T04:57:17,819 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733806637818Disabling compacts and flushes for region at 1733806637818Disabling writes for close at 1733806637818Writing region close event to WAL at 1733806637819 (+1 ms)Closed at 1733806637819 2024-12-10T04:57:17,821 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-10T04:57:17,821 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-10T04:57:17,821 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-10T04:57:17,823 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-10T04:57:17,825 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-10T04:57:17,832 DEBUG [RS:1;6578523f4421:46693 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-10T04:57:17,832 DEBUG [RS:2;6578523f4421:45069 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-10T04:57:17,832 DEBUG [RS:1;6578523f4421:46693 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-10T04:57:17,832 DEBUG [RS:2;6578523f4421:45069 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-10T04:57:17,832 DEBUG [RS:0;6578523f4421:33029 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-10T04:57:17,832 DEBUG [RS:0;6578523f4421:33029 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-10T04:57:17,853 DEBUG [RS:2;6578523f4421:45069 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-10T04:57:17,853 DEBUG [RS:1;6578523f4421:46693 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-10T04:57:17,853 DEBUG [RS:0;6578523f4421:33029 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-10T04:57:17,853 DEBUG [RS:0;6578523f4421:33029 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b362bdf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6578523f4421/172.17.0.2:0 2024-12-10T04:57:17,853 DEBUG [RS:2;6578523f4421:45069 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b8d0d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6578523f4421/172.17.0.2:0 2024-12-10T04:57:17,854 DEBUG [RS:1;6578523f4421:46693 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6125569d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6578523f4421/172.17.0.2:0 2024-12-10T04:57:17,864 DEBUG [RS:0;6578523f4421:33029 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;6578523f4421:33029 2024-12-10T04:57:17,865 INFO [RS:0;6578523f4421:33029 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-10T04:57:17,865 INFO [RS:0;6578523f4421:33029 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-10T04:57:17,865 DEBUG [RS:0;6578523f4421:33029 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-10T04:57:17,865 INFO [RS:0;6578523f4421:33029 {}] regionserver.HRegionServer(2659): reportForDuty to master=6578523f4421,39099,1733806637136 with port=33029, startcode=1733806637309 2024-12-10T04:57:17,866 DEBUG [RS:0;6578523f4421:33029 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-10T04:57:17,867 DEBUG [RS:2;6578523f4421:45069 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;6578523f4421:45069 2024-12-10T04:57:17,867 INFO [RS:2;6578523f4421:45069 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-10T04:57:17,867 INFO [RS:2;6578523f4421:45069 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-10T04:57:17,867 DEBUG [RS:2;6578523f4421:45069 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-10T04:57:17,868 INFO [RS:2;6578523f4421:45069 {}] regionserver.HRegionServer(2659): reportForDuty to master=6578523f4421,39099,1733806637136 with port=45069, startcode=1733806637387 2024-12-10T04:57:17,868 DEBUG [RS:2;6578523f4421:45069 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-10T04:57:17,872 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55491, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-10T04:57:17,873 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60233, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-10T04:57:17,873 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39099 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 6578523f4421,33029,1733806637309 2024-12-10T04:57:17,873 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39099 {}] master.ServerManager(517): Registering regionserver=6578523f4421,33029,1733806637309 2024-12-10T04:57:17,876 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39099 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 6578523f4421,45069,1733806637387 2024-12-10T04:57:17,876 DEBUG [RS:0;6578523f4421:33029 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589 2024-12-10T04:57:17,876 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39099 {}] master.ServerManager(517): Registering regionserver=6578523f4421,45069,1733806637387 2024-12-10T04:57:17,876 DEBUG [RS:0;6578523f4421:33029 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35491 2024-12-10T04:57:17,876 DEBUG [RS:0;6578523f4421:33029 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-10T04:57:17,876 DEBUG [RS:1;6578523f4421:46693 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;6578523f4421:46693 2024-12-10T04:57:17,876 INFO [RS:1;6578523f4421:46693 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-10T04:57:17,876 INFO [RS:1;6578523f4421:46693 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-10T04:57:17,876 DEBUG [RS:1;6578523f4421:46693 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-10T04:57:17,878 DEBUG [RS:2;6578523f4421:45069 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589 2024-12-10T04:57:17,878 DEBUG [RS:2;6578523f4421:45069 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35491 2024-12-10T04:57:17,878 DEBUG [RS:2;6578523f4421:45069 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-10T04:57:17,884 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39099-0x1000e15c6fc0000, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-10T04:57:17,884 INFO [RS:1;6578523f4421:46693 {}] regionserver.HRegionServer(2659): reportForDuty to master=6578523f4421,39099,1733806637136 with port=46693, startcode=1733806637347 2024-12-10T04:57:17,884 DEBUG [RS:1;6578523f4421:46693 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-10T04:57:17,886 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59089, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-10T04:57:17,887 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39099 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 6578523f4421,46693,1733806637347 2024-12-10T04:57:17,887 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39099 {}] master.ServerManager(517): Registering regionserver=6578523f4421,46693,1733806637347 2024-12-10T04:57:17,889 DEBUG [RS:1;6578523f4421:46693 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589 2024-12-10T04:57:17,889 DEBUG [RS:1;6578523f4421:46693 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35491 2024-12-10T04:57:17,889 DEBUG [RS:1;6578523f4421:46693 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-10T04:57:17,926 DEBUG [RS:0;6578523f4421:33029 {}] zookeeper.ZKUtil(111): regionserver:33029-0x1000e15c6fc0001, quorum=127.0.0.1:51167, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/6578523f4421,33029,1733806637309 2024-12-10T04:57:17,926 WARN [RS:0;6578523f4421:33029 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-10T04:57:17,926 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [6578523f4421,45069,1733806637387] 2024-12-10T04:57:17,926 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [6578523f4421,33029,1733806637309] 2024-12-10T04:57:17,926 INFO [RS:0;6578523f4421:33029 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-10T04:57:17,926 DEBUG [RS:0;6578523f4421:33029 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/WALs/6578523f4421,33029,1733806637309 2024-12-10T04:57:17,947 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39099-0x1000e15c6fc0000, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-10T04:57:17,947 DEBUG [RS:2;6578523f4421:45069 {}] zookeeper.ZKUtil(111): regionserver:45069-0x1000e15c6fc0003, quorum=127.0.0.1:51167, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/6578523f4421,45069,1733806637387 2024-12-10T04:57:17,947 WARN [RS:2;6578523f4421:45069 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-10T04:57:17,947 INFO [RS:2;6578523f4421:45069 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-10T04:57:17,947 DEBUG [RS:2;6578523f4421:45069 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/WALs/6578523f4421,45069,1733806637387 2024-12-10T04:57:17,948 DEBUG [RS:1;6578523f4421:46693 {}] zookeeper.ZKUtil(111): regionserver:46693-0x1000e15c6fc0002, quorum=127.0.0.1:51167, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/6578523f4421,46693,1733806637347 2024-12-10T04:57:17,948 WARN [RS:1;6578523f4421:46693 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-10T04:57:17,948 INFO [RS:1;6578523f4421:46693 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-10T04:57:17,948 DEBUG [RS:1;6578523f4421:46693 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/WALs/6578523f4421,46693,1733806637347 2024-12-10T04:57:17,948 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [6578523f4421,46693,1733806637347] 2024-12-10T04:57:17,948 INFO [RS:0;6578523f4421:33029 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-10T04:57:17,950 INFO [RS:0;6578523f4421:33029 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-10T04:57:17,951 INFO [RS:0;6578523f4421:33029 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-10T04:57:17,951 INFO [RS:0;6578523f4421:33029 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:17,951 INFO [RS:0;6578523f4421:33029 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-10T04:57:17,952 INFO [RS:2;6578523f4421:45069 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-10T04:57:17,952 INFO [RS:1;6578523f4421:46693 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-10T04:57:17,953 INFO [RS:0;6578523f4421:33029 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-10T04:57:17,953 INFO [RS:0;6578523f4421:33029 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:17,953 DEBUG [RS:0;6578523f4421:33029 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:17,953 DEBUG [RS:0;6578523f4421:33029 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:17,953 DEBUG [RS:0;6578523f4421:33029 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:17,953 DEBUG [RS:0;6578523f4421:33029 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:17,953 DEBUG [RS:0;6578523f4421:33029 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:17,953 DEBUG [RS:0;6578523f4421:33029 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/6578523f4421:0, corePoolSize=2, maxPoolSize=2 2024-12-10T04:57:17,954 DEBUG [RS:0;6578523f4421:33029 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:17,954 DEBUG [RS:0;6578523f4421:33029 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:17,954 DEBUG [RS:0;6578523f4421:33029 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:17,954 DEBUG [RS:0;6578523f4421:33029 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:17,954 DEBUG [RS:0;6578523f4421:33029 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:17,954 DEBUG [RS:0;6578523f4421:33029 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:17,954 DEBUG [RS:0;6578523f4421:33029 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/6578523f4421:0, corePoolSize=3, maxPoolSize=3 2024-12-10T04:57:17,954 DEBUG [RS:0;6578523f4421:33029 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/6578523f4421:0, corePoolSize=3, maxPoolSize=3 2024-12-10T04:57:17,957 INFO [RS:2;6578523f4421:45069 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-10T04:57:17,957 INFO [RS:1;6578523f4421:46693 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-10T04:57:17,957 INFO [RS:2;6578523f4421:45069 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-10T04:57:17,957 INFO [RS:2;6578523f4421:45069 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:17,961 INFO [RS:0;6578523f4421:33029 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:17,961 INFO [RS:0;6578523f4421:33029 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:17,961 INFO [RS:0;6578523f4421:33029 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:17,961 INFO [RS:0;6578523f4421:33029 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:17,961 INFO [RS:0;6578523f4421:33029 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:17,961 INFO [RS:0;6578523f4421:33029 {}] hbase.ChoreService(168): Chore ScheduledChore name=6578523f4421,33029,1733806637309-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-10T04:57:17,961 INFO [RS:2;6578523f4421:45069 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-10T04:57:17,961 INFO [RS:1;6578523f4421:46693 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-10T04:57:17,961 INFO [RS:1;6578523f4421:46693 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:17,962 INFO [RS:2;6578523f4421:45069 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-10T04:57:17,962 INFO [RS:2;6578523f4421:45069 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:17,962 DEBUG [RS:2;6578523f4421:45069 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:17,963 DEBUG [RS:2;6578523f4421:45069 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:17,963 DEBUG [RS:2;6578523f4421:45069 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:17,963 DEBUG [RS:2;6578523f4421:45069 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:17,963 DEBUG [RS:2;6578523f4421:45069 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:17,963 DEBUG [RS:2;6578523f4421:45069 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/6578523f4421:0, corePoolSize=2, maxPoolSize=2 2024-12-10T04:57:17,963 DEBUG [RS:2;6578523f4421:45069 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:17,963 DEBUG [RS:2;6578523f4421:45069 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:17,963 DEBUG [RS:2;6578523f4421:45069 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:17,963 DEBUG [RS:2;6578523f4421:45069 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:17,963 DEBUG [RS:2;6578523f4421:45069 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:17,963 DEBUG [RS:2;6578523f4421:45069 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:17,963 DEBUG [RS:2;6578523f4421:45069 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/6578523f4421:0, corePoolSize=3, maxPoolSize=3 2024-12-10T04:57:17,963 DEBUG [RS:2;6578523f4421:45069 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/6578523f4421:0, corePoolSize=3, maxPoolSize=3 2024-12-10T04:57:17,965 INFO [RS:1;6578523f4421:46693 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-10T04:57:17,966 INFO [RS:1;6578523f4421:46693 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-10T04:57:17,966 INFO [RS:2;6578523f4421:45069 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:17,966 INFO [RS:2;6578523f4421:45069 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:17,966 INFO [RS:1;6578523f4421:46693 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:17,966 INFO [RS:2;6578523f4421:45069 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:17,966 INFO [RS:2;6578523f4421:45069 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:17,966 INFO [RS:2;6578523f4421:45069 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:17,966 DEBUG [RS:1;6578523f4421:46693 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:17,966 INFO [RS:2;6578523f4421:45069 {}] hbase.ChoreService(168): Chore ScheduledChore name=6578523f4421,45069,1733806637387-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-10T04:57:17,966 DEBUG [RS:1;6578523f4421:46693 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:17,966 DEBUG [RS:1;6578523f4421:46693 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:17,966 DEBUG [RS:1;6578523f4421:46693 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:17,966 DEBUG [RS:1;6578523f4421:46693 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:17,966 DEBUG [RS:1;6578523f4421:46693 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/6578523f4421:0, corePoolSize=2, maxPoolSize=2 2024-12-10T04:57:17,966 DEBUG [RS:1;6578523f4421:46693 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:17,967 DEBUG [RS:1;6578523f4421:46693 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:17,967 DEBUG [RS:1;6578523f4421:46693 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:17,967 DEBUG [RS:1;6578523f4421:46693 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:17,967 DEBUG [RS:1;6578523f4421:46693 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:17,967 DEBUG [RS:1;6578523f4421:46693 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/6578523f4421:0, corePoolSize=1, maxPoolSize=1 2024-12-10T04:57:17,967 DEBUG [RS:1;6578523f4421:46693 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/6578523f4421:0, corePoolSize=3, maxPoolSize=3 2024-12-10T04:57:17,967 DEBUG [RS:1;6578523f4421:46693 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/6578523f4421:0, corePoolSize=3, maxPoolSize=3 2024-12-10T04:57:17,969 INFO [RS:1;6578523f4421:46693 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:17,969 INFO [RS:1;6578523f4421:46693 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:17,969 INFO [RS:1;6578523f4421:46693 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:17,969 INFO [RS:1;6578523f4421:46693 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:17,969 INFO [RS:1;6578523f4421:46693 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:17,969 INFO [RS:1;6578523f4421:46693 {}] hbase.ChoreService(168): Chore ScheduledChore name=6578523f4421,46693,1733806637347-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-10T04:57:17,974 INFO [RS:0;6578523f4421:33029 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-10T04:57:17,975 INFO [RS:0;6578523f4421:33029 {}] hbase.ChoreService(168): Chore ScheduledChore name=6578523f4421,33029,1733806637309-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:17,975 INFO [RS:0;6578523f4421:33029 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:17,975 INFO [RS:0;6578523f4421:33029 {}] regionserver.Replication(171): 6578523f4421,33029,1733806637309 started 2024-12-10T04:57:17,975 WARN [6578523f4421:39099 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-10T04:57:17,981 INFO [RS:2;6578523f4421:45069 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-10T04:57:17,981 INFO [RS:2;6578523f4421:45069 {}] hbase.ChoreService(168): Chore ScheduledChore name=6578523f4421,45069,1733806637387-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:17,981 INFO [RS:2;6578523f4421:45069 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:17,981 INFO [RS:2;6578523f4421:45069 {}] regionserver.Replication(171): 6578523f4421,45069,1733806637387 started 2024-12-10T04:57:17,987 INFO [RS:0;6578523f4421:33029 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:17,987 INFO [RS:0;6578523f4421:33029 {}] regionserver.HRegionServer(1482): Serving as 6578523f4421,33029,1733806637309, RpcServer on 6578523f4421/172.17.0.2:33029, sessionid=0x1000e15c6fc0001 2024-12-10T04:57:17,987 DEBUG [RS:0;6578523f4421:33029 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-10T04:57:17,987 DEBUG [RS:0;6578523f4421:33029 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 6578523f4421,33029,1733806637309 2024-12-10T04:57:17,987 DEBUG [RS:0;6578523f4421:33029 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6578523f4421,33029,1733806637309' 2024-12-10T04:57:17,987 DEBUG [RS:0;6578523f4421:33029 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-10T04:57:17,988 INFO [RS:1;6578523f4421:46693 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-10T04:57:17,988 DEBUG [RS:0;6578523f4421:33029 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-10T04:57:17,988 INFO [RS:1;6578523f4421:46693 {}] hbase.ChoreService(168): Chore ScheduledChore name=6578523f4421,46693,1733806637347-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:17,988 INFO [RS:1;6578523f4421:46693 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:17,988 INFO [RS:1;6578523f4421:46693 {}] regionserver.Replication(171): 6578523f4421,46693,1733806637347 started 2024-12-10T04:57:17,988 DEBUG [RS:0;6578523f4421:33029 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-10T04:57:17,988 DEBUG [RS:0;6578523f4421:33029 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-10T04:57:17,988 DEBUG [RS:0;6578523f4421:33029 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 6578523f4421,33029,1733806637309 2024-12-10T04:57:17,988 DEBUG [RS:0;6578523f4421:33029 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6578523f4421,33029,1733806637309' 2024-12-10T04:57:17,988 DEBUG [RS:0;6578523f4421:33029 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-10T04:57:17,988 DEBUG [RS:0;6578523f4421:33029 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-10T04:57:17,989 DEBUG [RS:0;6578523f4421:33029 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-10T04:57:17,989 INFO [RS:0;6578523f4421:33029 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-10T04:57:17,989 INFO [RS:0;6578523f4421:33029 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-10T04:57:17,994 INFO [RS:2;6578523f4421:45069 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:17,994 INFO [RS:2;6578523f4421:45069 {}] regionserver.HRegionServer(1482): Serving as 6578523f4421,45069,1733806637387, RpcServer on 6578523f4421/172.17.0.2:45069, sessionid=0x1000e15c6fc0003 2024-12-10T04:57:17,994 DEBUG [RS:2;6578523f4421:45069 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-10T04:57:17,995 DEBUG [RS:2;6578523f4421:45069 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 6578523f4421,45069,1733806637387 2024-12-10T04:57:17,995 DEBUG [RS:2;6578523f4421:45069 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6578523f4421,45069,1733806637387' 2024-12-10T04:57:17,995 DEBUG [RS:2;6578523f4421:45069 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-10T04:57:17,995 DEBUG [RS:2;6578523f4421:45069 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-10T04:57:17,996 DEBUG [RS:2;6578523f4421:45069 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-10T04:57:17,996 DEBUG [RS:2;6578523f4421:45069 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-10T04:57:17,996 DEBUG [RS:2;6578523f4421:45069 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 6578523f4421,45069,1733806637387 2024-12-10T04:57:17,996 DEBUG [RS:2;6578523f4421:45069 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6578523f4421,45069,1733806637387' 2024-12-10T04:57:17,996 DEBUG [RS:2;6578523f4421:45069 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-10T04:57:17,996 DEBUG [RS:2;6578523f4421:45069 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-10T04:57:17,997 DEBUG [RS:2;6578523f4421:45069 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-10T04:57:17,997 INFO [RS:2;6578523f4421:45069 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-10T04:57:17,997 INFO [RS:2;6578523f4421:45069 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-10T04:57:18,000 INFO [RS:1;6578523f4421:46693 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:18,000 INFO [RS:1;6578523f4421:46693 {}] regionserver.HRegionServer(1482): Serving as 6578523f4421,46693,1733806637347, RpcServer on 6578523f4421/172.17.0.2:46693, sessionid=0x1000e15c6fc0002 2024-12-10T04:57:18,000 DEBUG [RS:1;6578523f4421:46693 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-10T04:57:18,000 DEBUG [RS:1;6578523f4421:46693 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 6578523f4421,46693,1733806637347 2024-12-10T04:57:18,000 DEBUG [RS:1;6578523f4421:46693 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6578523f4421,46693,1733806637347' 2024-12-10T04:57:18,000 DEBUG [RS:1;6578523f4421:46693 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-10T04:57:18,001 DEBUG [RS:1;6578523f4421:46693 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-10T04:57:18,001 DEBUG [RS:1;6578523f4421:46693 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-10T04:57:18,001 DEBUG [RS:1;6578523f4421:46693 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-10T04:57:18,001 DEBUG [RS:1;6578523f4421:46693 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 6578523f4421,46693,1733806637347 2024-12-10T04:57:18,001 DEBUG [RS:1;6578523f4421:46693 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6578523f4421,46693,1733806637347' 2024-12-10T04:57:18,001 DEBUG [RS:1;6578523f4421:46693 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-10T04:57:18,002 DEBUG [RS:1;6578523f4421:46693 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-10T04:57:18,002 DEBUG [RS:1;6578523f4421:46693 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-10T04:57:18,002 INFO [RS:1;6578523f4421:46693 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-10T04:57:18,002 INFO [RS:1;6578523f4421:46693 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-10T04:57:18,094 INFO [RS:0;6578523f4421:33029 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6578523f4421%2C33029%2C1733806637309, suffix=, logDir=hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/WALs/6578523f4421,33029,1733806637309, archiveDir=hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/oldWALs, maxLogs=32 2024-12-10T04:57:18,097 INFO [RS:0;6578523f4421:33029 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6578523f4421%2C33029%2C1733806637309.1733806638096 2024-12-10T04:57:18,099 INFO [RS:2;6578523f4421:45069 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6578523f4421%2C45069%2C1733806637387, suffix=, logDir=hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/WALs/6578523f4421,45069,1733806637387, archiveDir=hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/oldWALs, maxLogs=32 2024-12-10T04:57:18,100 INFO [RS:2;6578523f4421:45069 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6578523f4421%2C45069%2C1733806637387.1733806638099 2024-12-10T04:57:18,104 INFO [RS:1;6578523f4421:46693 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6578523f4421%2C46693%2C1733806637347, suffix=, logDir=hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/WALs/6578523f4421,46693,1733806637347, archiveDir=hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/oldWALs, maxLogs=32 2024-12-10T04:57:18,104 INFO [RS:0;6578523f4421:33029 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/WALs/6578523f4421,33029,1733806637309/6578523f4421%2C33029%2C1733806637309.1733806638096 2024-12-10T04:57:18,105 INFO [RS:1;6578523f4421:46693 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6578523f4421%2C46693%2C1733806637347.1733806638105 2024-12-10T04:57:18,108 INFO [RS:2;6578523f4421:45069 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/WALs/6578523f4421,45069,1733806637387/6578523f4421%2C45069%2C1733806637387.1733806638099 2024-12-10T04:57:18,109 DEBUG [RS:0;6578523f4421:33029 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33263:33263),(127.0.0.1/127.0.0.1:43517:43517),(127.0.0.1/127.0.0.1:40633:40633)] 2024-12-10T04:57:18,110 DEBUG [RS:2;6578523f4421:45069 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33263:33263),(127.0.0.1/127.0.0.1:43517:43517),(127.0.0.1/127.0.0.1:40633:40633)] 2024-12-10T04:57:18,116 INFO [RS:1;6578523f4421:46693 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/WALs/6578523f4421,46693,1733806637347/6578523f4421%2C46693%2C1733806637347.1733806638105 2024-12-10T04:57:18,120 DEBUG [RS:1;6578523f4421:46693 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40633:40633),(127.0.0.1/127.0.0.1:43517:43517),(127.0.0.1/127.0.0.1:33263:33263)] 2024-12-10T04:57:18,226 DEBUG [6578523f4421:39099 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-10T04:57:18,226 DEBUG [6578523f4421:39099 {}] balancer.BalancerClusterState(204): Hosts are {6578523f4421=0} racks are {/default-rack=0} 2024-12-10T04:57:18,230 DEBUG [6578523f4421:39099 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-10T04:57:18,230 DEBUG [6578523f4421:39099 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-10T04:57:18,230 DEBUG [6578523f4421:39099 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-10T04:57:18,230 DEBUG [6578523f4421:39099 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-10T04:57:18,230 DEBUG [6578523f4421:39099 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-10T04:57:18,230 DEBUG [6578523f4421:39099 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-10T04:57:18,230 INFO [6578523f4421:39099 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-10T04:57:18,230 INFO [6578523f4421:39099 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-10T04:57:18,230 INFO [6578523f4421:39099 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-10T04:57:18,230 DEBUG [6578523f4421:39099 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-10T04:57:18,230 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=6578523f4421,33029,1733806637309 2024-12-10T04:57:18,232 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6578523f4421,33029,1733806637309, state=OPENING 2024-12-10T04:57:18,242 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-10T04:57:18,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33029-0x1000e15c6fc0001, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:18,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39099-0x1000e15c6fc0000, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:18,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46693-0x1000e15c6fc0002, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:18,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45069-0x1000e15c6fc0003, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:18,254 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-10T04:57:18,254 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T04:57:18,254 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T04:57:18,254 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=6578523f4421,33029,1733806637309}] 2024-12-10T04:57:18,254 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T04:57:18,254 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T04:57:18,413 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-10T04:57:18,418 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33829, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-10T04:57:18,426 INFO [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-10T04:57:18,426 INFO [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-10T04:57:18,429 INFO [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6578523f4421%2C33029%2C1733806637309.meta, suffix=.meta, logDir=hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/WALs/6578523f4421,33029,1733806637309, archiveDir=hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/oldWALs, maxLogs=32 2024-12-10T04:57:18,431 INFO [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 6578523f4421%2C33029%2C1733806637309.meta.1733806638430.meta 2024-12-10T04:57:18,438 INFO [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/WALs/6578523f4421,33029,1733806637309/6578523f4421%2C33029%2C1733806637309.meta.1733806638430.meta 2024-12-10T04:57:18,439 DEBUG [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40633:40633),(127.0.0.1/127.0.0.1:33263:33263),(127.0.0.1/127.0.0.1:43517:43517)] 2024-12-10T04:57:18,440 DEBUG [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-10T04:57:18,440 DEBUG [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-10T04:57:18,441 DEBUG [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-10T04:57:18,441 INFO [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-10T04:57:18,441 DEBUG [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-10T04:57:18,441 DEBUG [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T04:57:18,441 DEBUG [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-10T04:57:18,441 DEBUG [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-10T04:57:18,443 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-10T04:57:18,444 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-10T04:57:18,444 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T04:57:18,444 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T04:57:18,445 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-10T04:57:18,445 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-10T04:57:18,446 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T04:57:18,446 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T04:57:18,446 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-10T04:57:18,447 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-10T04:57:18,447 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T04:57:18,448 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T04:57:18,448 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-10T04:57:18,449 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-10T04:57:18,449 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T04:57:18,449 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T04:57:18,449 DEBUG [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-10T04:57:18,450 DEBUG [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/data/hbase/meta/1588230740 2024-12-10T04:57:18,452 DEBUG [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/data/hbase/meta/1588230740 2024-12-10T04:57:18,454 DEBUG [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-10T04:57:18,454 DEBUG [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-10T04:57:18,454 DEBUG [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-10T04:57:18,456 DEBUG [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-10T04:57:18,458 INFO [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69698333, jitterRate=0.0385860949754715}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-10T04:57:18,458 DEBUG [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-10T04:57:18,459 DEBUG [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733806638441Writing region info on filesystem at 1733806638441Initializing all the Stores at 1733806638442 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733806638442Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733806638443 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733806638443Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733806638443Cleaning up temporary data from old regions at 1733806638454 (+11 ms)Running coprocessor post-open hooks at 1733806638458 (+4 ms)Region opened successfully at 1733806638459 (+1 ms) 2024-12-10T04:57:18,461 INFO [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733806638412 2024-12-10T04:57:18,464 DEBUG [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-10T04:57:18,464 INFO [RS_OPEN_META-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-10T04:57:18,465 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=6578523f4421,33029,1733806637309 2024-12-10T04:57:18,467 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6578523f4421,33029,1733806637309, state=OPEN 2024-12-10T04:57:18,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45069-0x1000e15c6fc0003, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-10T04:57:18,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33029-0x1000e15c6fc0001, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-10T04:57:18,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46693-0x1000e15c6fc0002, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-10T04:57:18,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39099-0x1000e15c6fc0000, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-10T04:57:18,473 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=6578523f4421,33029,1733806637309 2024-12-10T04:57:18,474 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T04:57:18,474 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T04:57:18,474 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T04:57:18,474 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T04:57:18,478 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-10T04:57:18,479 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=6578523f4421,33029,1733806637309 in 220 msec 2024-12-10T04:57:18,482 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-10T04:57:18,482 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 657 msec 2024-12-10T04:57:18,483 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-10T04:57:18,483 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-10T04:57:18,485 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-10T04:57:18,485 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6578523f4421,33029,1733806637309, seqNum=-1] 2024-12-10T04:57:18,486 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T04:57:18,487 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47007, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T04:57:18,495 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 731 msec 2024-12-10T04:57:18,495 INFO [master/6578523f4421:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733806638495, completionTime=-1 2024-12-10T04:57:18,495 INFO [master/6578523f4421:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-10T04:57:18,495 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-10T04:57:18,497 INFO [master/6578523f4421:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-10T04:57:18,497 INFO [master/6578523f4421:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733806698497 2024-12-10T04:57:18,497 INFO [master/6578523f4421:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733806758497 2024-12-10T04:57:18,497 INFO [master/6578523f4421:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-10T04:57:18,498 INFO [master/6578523f4421:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6578523f4421,39099,1733806637136-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:18,498 INFO [master/6578523f4421:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6578523f4421,39099,1733806637136-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:18,498 INFO [master/6578523f4421:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6578523f4421,39099,1733806637136-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:18,498 INFO [master/6578523f4421:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-6578523f4421:39099, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:18,498 INFO [master/6578523f4421:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:18,498 INFO [master/6578523f4421:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:18,501 DEBUG [master/6578523f4421:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-10T04:57:18,503 INFO [master/6578523f4421:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.050sec 2024-12-10T04:57:18,503 INFO [master/6578523f4421:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-10T04:57:18,503 INFO [master/6578523f4421:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-10T04:57:18,504 INFO [master/6578523f4421:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-10T04:57:18,504 INFO [master/6578523f4421:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-10T04:57:18,504 INFO [master/6578523f4421:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-10T04:57:18,504 INFO [master/6578523f4421:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6578523f4421,39099,1733806637136-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-10T04:57:18,504 INFO [master/6578523f4421:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6578523f4421,39099,1733806637136-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-10T04:57:18,506 DEBUG [master/6578523f4421:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-10T04:57:18,506 INFO [master/6578523f4421:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-10T04:57:18,507 INFO [master/6578523f4421:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6578523f4421,39099,1733806637136-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T04:57:18,509 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@376b00bc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T04:57:18,509 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 6578523f4421,39099,-1 for getting cluster id 2024-12-10T04:57:18,510 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-10T04:57:18,510 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-10T04:57:18,510 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-10T04:57:18,511 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f91e6b47-d067-4027-a7b7-36c101a8d8e9' 2024-12-10T04:57:18,512 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-10T04:57:18,512 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-10T04:57:18,512 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f91e6b47-d067-4027-a7b7-36c101a8d8e9" 2024-12-10T04:57:18,512 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-10T04:57:18,512 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@29b3cf77, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T04:57:18,513 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-10T04:57:18,513 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [6578523f4421,39099,-1] 2024-12-10T04:57:18,513 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-10T04:57:18,513 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-10T04:57:18,514 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T04:57:18,516 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44288, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-10T04:57:18,518 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a6e0fe9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T04:57:18,519 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-10T04:57:18,521 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6578523f4421,33029,1733806637309, seqNum=-1] 2024-12-10T04:57:18,521 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T04:57:18,525 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44944, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T04:57:18,530 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=6578523f4421,39099,1733806637136 2024-12-10T04:57:18,531 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-10T04:57:18,533 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.AsyncConnectionImpl(321): The fetched master address is 6578523f4421,39099,1733806637136 2024-12-10T04:57:18,533 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@33cf2e9d 2024-12-10T04:57:18,534 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-10T04:57:18,536 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44294, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-10T04:57:18,537 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39099 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T04:57:18,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39099 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-10T04:57:18,549 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-10T04:57:18,550 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T04:57:18,550 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39099 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-10T04:57:18,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39099 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-10T04:57:18,552 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-10T04:57:18,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41657 is added to blk_1073741837_1013 (size=392) 2024-12-10T04:57:18,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42773 is added to blk_1073741837_1013 (size=392) 2024-12-10T04:57:18,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39149 is added to blk_1073741837_1013 (size=392) 2024-12-10T04:57:18,574 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => f0b704266d11c838a151966509404c7d, NAME => 'TestHBaseWalOnEC,,1733806638537.f0b704266d11c838a151966509404c7d.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589 2024-12-10T04:57:18,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42773 is added to blk_1073741838_1014 (size=51) 2024-12-10T04:57:18,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41657 is added to blk_1073741838_1014 (size=51) 2024-12-10T04:57:18,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39149 is added to blk_1073741838_1014 (size=51) 2024-12-10T04:57:18,595 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733806638537.f0b704266d11c838a151966509404c7d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T04:57:18,595 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing f0b704266d11c838a151966509404c7d, disabling compactions & flushes 2024-12-10T04:57:18,595 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733806638537.f0b704266d11c838a151966509404c7d. 2024-12-10T04:57:18,595 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733806638537.f0b704266d11c838a151966509404c7d. 2024-12-10T04:57:18,595 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733806638537.f0b704266d11c838a151966509404c7d. after waiting 0 ms 2024-12-10T04:57:18,595 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733806638537.f0b704266d11c838a151966509404c7d. 2024-12-10T04:57:18,595 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733806638537.f0b704266d11c838a151966509404c7d. 2024-12-10T04:57:18,595 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for f0b704266d11c838a151966509404c7d: Waiting for close lock at 1733806638595Disabling compacts and flushes for region at 1733806638595Disabling writes for close at 1733806638595Writing region close event to WAL at 1733806638595Closed at 1733806638595 2024-12-10T04:57:18,598 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-10T04:57:18,598 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733806638537.f0b704266d11c838a151966509404c7d.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733806638598"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733806638598"}]},"ts":"1733806638598"} 2024-12-10T04:57:18,603 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-10T04:57:18,605 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-10T04:57:18,605 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733806638605"}]},"ts":"1733806638605"} 2024-12-10T04:57:18,613 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-10T04:57:18,613 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {6578523f4421=0} racks are {/default-rack=0} 2024-12-10T04:57:18,615 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-10T04:57:18,615 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-10T04:57:18,615 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-10T04:57:18,615 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-10T04:57:18,615 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-10T04:57:18,615 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-10T04:57:18,615 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-10T04:57:18,615 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-10T04:57:18,615 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-10T04:57:18,615 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-10T04:57:18,615 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=f0b704266d11c838a151966509404c7d, ASSIGN}] 2024-12-10T04:57:18,618 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=f0b704266d11c838a151966509404c7d, ASSIGN 2024-12-10T04:57:18,620 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=f0b704266d11c838a151966509404c7d, ASSIGN; state=OFFLINE, location=6578523f4421,46693,1733806637347; forceNewPlan=false, retain=false 2024-12-10T04:57:18,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39099 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-10T04:57:18,771 INFO [6578523f4421:39099 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-10T04:57:18,772 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=f0b704266d11c838a151966509404c7d, regionState=OPENING, regionLocation=6578523f4421,46693,1733806637347 2024-12-10T04:57:18,777 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=f0b704266d11c838a151966509404c7d, ASSIGN because future has completed 2024-12-10T04:57:18,778 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure f0b704266d11c838a151966509404c7d, server=6578523f4421,46693,1733806637347}] 2024-12-10T04:57:18,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39099 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-10T04:57:18,932 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-10T04:57:18,934 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42955, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-10T04:57:18,942 INFO [RS_OPEN_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733806638537.f0b704266d11c838a151966509404c7d. 2024-12-10T04:57:18,942 DEBUG [RS_OPEN_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => f0b704266d11c838a151966509404c7d, NAME => 'TestHBaseWalOnEC,,1733806638537.f0b704266d11c838a151966509404c7d.', STARTKEY => '', ENDKEY => ''} 2024-12-10T04:57:18,943 DEBUG [RS_OPEN_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC f0b704266d11c838a151966509404c7d 2024-12-10T04:57:18,943 DEBUG [RS_OPEN_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733806638537.f0b704266d11c838a151966509404c7d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T04:57:18,943 DEBUG [RS_OPEN_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for f0b704266d11c838a151966509404c7d 2024-12-10T04:57:18,943 DEBUG [RS_OPEN_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for f0b704266d11c838a151966509404c7d 2024-12-10T04:57:18,946 INFO [StoreOpener-f0b704266d11c838a151966509404c7d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region f0b704266d11c838a151966509404c7d 2024-12-10T04:57:18,949 INFO [StoreOpener-f0b704266d11c838a151966509404c7d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f0b704266d11c838a151966509404c7d columnFamilyName cf 2024-12-10T04:57:18,949 DEBUG [StoreOpener-f0b704266d11c838a151966509404c7d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T04:57:18,950 INFO [StoreOpener-f0b704266d11c838a151966509404c7d-1 {}] regionserver.HStore(327): Store=f0b704266d11c838a151966509404c7d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T04:57:18,950 DEBUG [RS_OPEN_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for f0b704266d11c838a151966509404c7d 2024-12-10T04:57:18,952 DEBUG [RS_OPEN_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/data/default/TestHBaseWalOnEC/f0b704266d11c838a151966509404c7d 2024-12-10T04:57:18,952 DEBUG [RS_OPEN_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/data/default/TestHBaseWalOnEC/f0b704266d11c838a151966509404c7d 2024-12-10T04:57:18,953 DEBUG [RS_OPEN_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for f0b704266d11c838a151966509404c7d 2024-12-10T04:57:18,953 DEBUG [RS_OPEN_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for f0b704266d11c838a151966509404c7d 2024-12-10T04:57:18,955 DEBUG [RS_OPEN_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for f0b704266d11c838a151966509404c7d 2024-12-10T04:57:18,958 DEBUG [RS_OPEN_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/data/default/TestHBaseWalOnEC/f0b704266d11c838a151966509404c7d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T04:57:18,959 INFO [RS_OPEN_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened f0b704266d11c838a151966509404c7d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66761965, jitterRate=-0.005169197916984558}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T04:57:18,959 DEBUG [RS_OPEN_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f0b704266d11c838a151966509404c7d 2024-12-10T04:57:18,960 DEBUG [RS_OPEN_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for f0b704266d11c838a151966509404c7d: Running coprocessor pre-open hook at 1733806638943Writing region info on filesystem at 1733806638943Initializing all the Stores at 1733806638945 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733806638945Cleaning up temporary data from old regions at 1733806638953 (+8 ms)Running coprocessor post-open hooks at 1733806638959 (+6 ms)Region opened successfully at 1733806638960 (+1 ms) 2024-12-10T04:57:18,962 INFO [RS_OPEN_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733806638537.f0b704266d11c838a151966509404c7d., pid=6, masterSystemTime=1733806638932 2024-12-10T04:57:18,967 DEBUG [RS_OPEN_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733806638537.f0b704266d11c838a151966509404c7d. 2024-12-10T04:57:18,967 INFO [RS_OPEN_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733806638537.f0b704266d11c838a151966509404c7d. 2024-12-10T04:57:18,968 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=f0b704266d11c838a151966509404c7d, regionState=OPEN, openSeqNum=2, regionLocation=6578523f4421,46693,1733806637347 2024-12-10T04:57:18,972 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure f0b704266d11c838a151966509404c7d, server=6578523f4421,46693,1733806637347 because future has completed 2024-12-10T04:57:18,981 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-10T04:57:18,981 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure f0b704266d11c838a151966509404c7d, server=6578523f4421,46693,1733806637347 in 198 msec 2024-12-10T04:57:18,987 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-10T04:57:18,987 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=f0b704266d11c838a151966509404c7d, ASSIGN in 366 msec 2024-12-10T04:57:18,989 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-10T04:57:18,989 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733806638989"}]},"ts":"1733806638989"} 2024-12-10T04:57:18,993 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-10T04:57:18,995 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-10T04:57:19,000 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 459 msec 2024-12-10T04:57:19,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39099 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-10T04:57:19,178 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-10T04:57:19,178 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-10T04:57:19,179 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T04:57:19,182 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-10T04:57:19,182 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-10T04:57:19,182 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-10T04:57:19,187 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733806638537.f0b704266d11c838a151966509404c7d., hostname=6578523f4421,46693,1733806637347, seqNum=2] 2024-12-10T04:57:19,187 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T04:57:19,190 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49588, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T04:57:19,193 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39099 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-10T04:57:19,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39099 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-10T04:57:19,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39099 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-10T04:57:19,197 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-10T04:57:19,199 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T04:57:19,199 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T04:57:19,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39099 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-10T04:57:19,359 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46693 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-10T04:57:19,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6578523f4421:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733806638537.f0b704266d11c838a151966509404c7d. 2024-12-10T04:57:19,361 INFO [RS_FLUSH_OPERATIONS-regionserver/6578523f4421:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing f0b704266d11c838a151966509404c7d 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-10T04:57:19,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6578523f4421:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/data/default/TestHBaseWalOnEC/f0b704266d11c838a151966509404c7d/.tmp/cf/2437e994d6c7420793e468245fa517cb is 36, key is row/cf:cq/1733806639190/Put/seqid=0 2024-12-10T04:57:19,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41657 is added to blk_1073741839_1015 (size=4787) 2024-12-10T04:57:19,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42773 is added to blk_1073741839_1015 (size=4787) 2024-12-10T04:57:19,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39149 is added to blk_1073741839_1015 (size=4787) 2024-12-10T04:57:19,401 INFO [RS_FLUSH_OPERATIONS-regionserver/6578523f4421:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/data/default/TestHBaseWalOnEC/f0b704266d11c838a151966509404c7d/.tmp/cf/2437e994d6c7420793e468245fa517cb 2024-12-10T04:57:19,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6578523f4421:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/data/default/TestHBaseWalOnEC/f0b704266d11c838a151966509404c7d/.tmp/cf/2437e994d6c7420793e468245fa517cb as hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/data/default/TestHBaseWalOnEC/f0b704266d11c838a151966509404c7d/cf/2437e994d6c7420793e468245fa517cb 2024-12-10T04:57:19,431 INFO [RS_FLUSH_OPERATIONS-regionserver/6578523f4421:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/data/default/TestHBaseWalOnEC/f0b704266d11c838a151966509404c7d/cf/2437e994d6c7420793e468245fa517cb, entries=1, sequenceid=5, filesize=4.7 K 2024-12-10T04:57:19,433 INFO [RS_FLUSH_OPERATIONS-regionserver/6578523f4421:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for f0b704266d11c838a151966509404c7d in 71ms, sequenceid=5, compaction requested=false 2024-12-10T04:57:19,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6578523f4421:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for f0b704266d11c838a151966509404c7d: 2024-12-10T04:57:19,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6578523f4421:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733806638537.f0b704266d11c838a151966509404c7d. 2024-12-10T04:57:19,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6578523f4421:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-10T04:57:19,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39099 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-10T04:57:19,440 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-10T04:57:19,440 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 237 msec 2024-12-10T04:57:19,443 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 247 msec 2024-12-10T04:57:19,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39099 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-10T04:57:19,519 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-10T04:57:19,524 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-10T04:57:19,524 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-10T04:57:19,524 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T04:57:19,524 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T04:57:19,525 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T04:57:19,525 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-10T04:57:19,525 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=834583412, stopped=false 2024-12-10T04:57:19,525 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=6578523f4421,39099,1733806637136 2024-12-10T04:57:19,525 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-10T04:57:19,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39099-0x1000e15c6fc0000, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-10T04:57:19,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45069-0x1000e15c6fc0003, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-10T04:57:19,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33029-0x1000e15c6fc0001, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-10T04:57:19,789 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-10T04:57:19,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33029-0x1000e15c6fc0001, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:19,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39099-0x1000e15c6fc0000, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:19,790 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46693-0x1000e15c6fc0002, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-10T04:57:19,790 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45069-0x1000e15c6fc0003, quorum=127.0.0.1:51167, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T04:57:19,790 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46693-0x1000e15c6fc0002, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:19,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45069-0x1000e15c6fc0003, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:19,790 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-10T04:57:19,791 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39099-0x1000e15c6fc0000, quorum=127.0.0.1:51167, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T04:57:19,791 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T04:57:19,791 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T04:57:19,791 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '6578523f4421,33029,1733806637309' ***** 2024-12-10T04:57:19,791 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-10T04:57:19,791 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '6578523f4421,46693,1733806637347' ***** 2024-12-10T04:57:19,792 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-10T04:57:19,792 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '6578523f4421,45069,1733806637387' ***** 2024-12-10T04:57:19,792 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-10T04:57:19,792 INFO [RS:1;6578523f4421:46693 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-10T04:57:19,792 INFO [RS:2;6578523f4421:45069 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-10T04:57:19,792 INFO [RS:1;6578523f4421:46693 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-10T04:57:19,792 INFO [RS:1;6578523f4421:46693 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-10T04:57:19,792 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-10T04:57:19,792 INFO [RS:1;6578523f4421:46693 {}] regionserver.HRegionServer(3091): Received CLOSE for f0b704266d11c838a151966509404c7d 2024-12-10T04:57:19,792 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-10T04:57:19,792 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46693-0x1000e15c6fc0002, quorum=127.0.0.1:51167, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T04:57:19,792 INFO [RS:2;6578523f4421:45069 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-10T04:57:19,792 INFO [RS:2;6578523f4421:45069 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-10T04:57:19,792 INFO [RS:2;6578523f4421:45069 {}] regionserver.HRegionServer(959): stopping server 6578523f4421,45069,1733806637387 2024-12-10T04:57:19,793 INFO [RS:2;6578523f4421:45069 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-10T04:57:19,793 INFO [RS:2;6578523f4421:45069 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;6578523f4421:45069. 2024-12-10T04:57:19,793 INFO [RS:0;6578523f4421:33029 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-10T04:57:19,793 DEBUG [RS:2;6578523f4421:45069 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T04:57:19,793 INFO [RS:0;6578523f4421:33029 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-10T04:57:19,793 DEBUG [RS:2;6578523f4421:45069 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T04:57:19,793 INFO [RS:0;6578523f4421:33029 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-10T04:57:19,793 INFO [RS:0;6578523f4421:33029 {}] regionserver.HRegionServer(959): stopping server 6578523f4421,33029,1733806637309 2024-12-10T04:57:19,793 INFO [RS:0;6578523f4421:33029 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-10T04:57:19,793 INFO [RS:2;6578523f4421:45069 {}] regionserver.HRegionServer(976): stopping server 6578523f4421,45069,1733806637387; all regions closed. 2024-12-10T04:57:19,793 INFO [RS:0;6578523f4421:33029 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;6578523f4421:33029. 2024-12-10T04:57:19,793 DEBUG [RS:0;6578523f4421:33029 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T04:57:19,793 DEBUG [RS:0;6578523f4421:33029 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T04:57:19,793 INFO [RS:0;6578523f4421:33029 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-10T04:57:19,793 INFO [RS:0;6578523f4421:33029 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-10T04:57:19,793 INFO [RS:0;6578523f4421:33029 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-10T04:57:19,794 INFO [RS:0;6578523f4421:33029 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-10T04:57:19,794 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-10T04:57:19,794 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33029-0x1000e15c6fc0001, quorum=127.0.0.1:51167, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T04:57:19,794 INFO [RS:1;6578523f4421:46693 {}] regionserver.HRegionServer(959): stopping server 6578523f4421,46693,1733806637347 2024-12-10T04:57:19,794 INFO [RS:1;6578523f4421:46693 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-10T04:57:19,794 INFO [RS:1;6578523f4421:46693 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;6578523f4421:46693. 2024-12-10T04:57:19,794 DEBUG [RS_CLOSE_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing f0b704266d11c838a151966509404c7d, disabling compactions & flushes 2024-12-10T04:57:19,794 DEBUG [RS:1;6578523f4421:46693 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T04:57:19,795 INFO [RS_CLOSE_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733806638537.f0b704266d11c838a151966509404c7d. 2024-12-10T04:57:19,795 DEBUG [RS:1;6578523f4421:46693 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T04:57:19,795 DEBUG [RS_CLOSE_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733806638537.f0b704266d11c838a151966509404c7d. 2024-12-10T04:57:19,795 DEBUG [RS_CLOSE_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733806638537.f0b704266d11c838a151966509404c7d. after waiting 0 ms 2024-12-10T04:57:19,795 INFO [RS:1;6578523f4421:46693 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-10T04:57:19,795 DEBUG [RS_CLOSE_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733806638537.f0b704266d11c838a151966509404c7d. 2024-12-10T04:57:19,795 DEBUG [RS:1;6578523f4421:46693 {}] regionserver.HRegionServer(1325): Online Regions={f0b704266d11c838a151966509404c7d=TestHBaseWalOnEC,,1733806638537.f0b704266d11c838a151966509404c7d.} 2024-12-10T04:57:19,795 DEBUG [RS:1;6578523f4421:46693 {}] regionserver.HRegionServer(1351): Waiting on f0b704266d11c838a151966509404c7d 2024-12-10T04:57:19,795 INFO [RS:0;6578523f4421:33029 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-10T04:57:19,795 DEBUG [RS:0;6578523f4421:33029 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-10T04:57:19,795 DEBUG [RS:0;6578523f4421:33029 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-10T04:57:19,795 DEBUG [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-10T04:57:19,796 INFO [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-10T04:57:19,796 DEBUG [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-10T04:57:19,796 DEBUG [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-10T04:57:19,796 DEBUG [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-10T04:57:19,796 INFO [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-10T04:57:19,799 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T04:57:19,799 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T04:57:19,799 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T04:57:19,801 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T04:57:19,802 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T04:57:19,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39149 is added to blk_1073741834_1010 (size=93) 2024-12-10T04:57:19,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41657 is added to blk_1073741834_1010 (size=93) 2024-12-10T04:57:19,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42773 is added to blk_1073741834_1010 (size=93) 2024-12-10T04:57:19,809 DEBUG [RS_CLOSE_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/data/default/TestHBaseWalOnEC/f0b704266d11c838a151966509404c7d/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-10T04:57:19,810 INFO [RS_CLOSE_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733806638537.f0b704266d11c838a151966509404c7d. 2024-12-10T04:57:19,810 DEBUG [RS_CLOSE_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for f0b704266d11c838a151966509404c7d: Waiting for close lock at 1733806639794Running coprocessor pre-close hooks at 1733806639794Disabling compacts and flushes for region at 1733806639794Disabling writes for close at 1733806639795 (+1 ms)Writing region close event to WAL at 1733806639799 (+4 ms)Running coprocessor post-close hooks at 1733806639810 (+11 ms)Closed at 1733806639810 2024-12-10T04:57:19,810 DEBUG [RS_CLOSE_REGION-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733806638537.f0b704266d11c838a151966509404c7d. 2024-12-10T04:57:19,811 DEBUG [RS:2;6578523f4421:45069 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/oldWALs 2024-12-10T04:57:19,811 INFO [RS:2;6578523f4421:45069 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6578523f4421%2C45069%2C1733806637387:(num 1733806638099) 2024-12-10T04:57:19,811 DEBUG [RS:2;6578523f4421:45069 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T04:57:19,811 INFO [RS:2;6578523f4421:45069 {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T04:57:19,811 INFO [RS:2;6578523f4421:45069 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-10T04:57:19,811 INFO [RS:2;6578523f4421:45069 {}] hbase.ChoreService(370): Chore service for: regionserver/6578523f4421:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-10T04:57:19,812 INFO [regionserver/6578523f4421:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-10T04:57:19,812 INFO [RS:2;6578523f4421:45069 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-10T04:57:19,812 INFO [RS:2;6578523f4421:45069 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-10T04:57:19,812 INFO [RS:2;6578523f4421:45069 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-10T04:57:19,812 INFO [RS:2;6578523f4421:45069 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-10T04:57:19,812 INFO [RS:2;6578523f4421:45069 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45069 2024-12-10T04:57:19,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39099-0x1000e15c6fc0000, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-10T04:57:19,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45069-0x1000e15c6fc0003, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/6578523f4421,45069,1733806637387 2024-12-10T04:57:19,821 INFO [RS:2;6578523f4421:45069 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-10T04:57:19,821 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$370/0x00007f91148f44c8@152d57f rejected from java.util.concurrent.ThreadPoolExecutor@44504b31[Terminated, pool size = 0, active threads = 0, queued tasks = 0, completed tasks = 14] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1365) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-12-10T04:57:19,821 DEBUG [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/data/hbase/meta/1588230740/.tmp/info/19adc04ea27949b795dfcfab0b4d1b0b is 153, key is TestHBaseWalOnEC,,1733806638537.f0b704266d11c838a151966509404c7d./info:regioninfo/1733806638967/Put/seqid=0 2024-12-10T04:57:19,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42773 is added to blk_1073741840_1016 (size=6637) 2024-12-10T04:57:19,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41657 is added to blk_1073741840_1016 (size=6637) 2024-12-10T04:57:19,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39149 is added to blk_1073741840_1016 (size=6637) 2024-12-10T04:57:19,829 INFO [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/data/hbase/meta/1588230740/.tmp/info/19adc04ea27949b795dfcfab0b4d1b0b 2024-12-10T04:57:19,831 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [6578523f4421,45069,1733806637387] 2024-12-10T04:57:19,842 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/6578523f4421,45069,1733806637387 already deleted, retry=false 2024-12-10T04:57:19,842 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 6578523f4421,45069,1733806637387 expired; onlineServers=2 2024-12-10T04:57:19,853 DEBUG [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/data/hbase/meta/1588230740/.tmp/ns/d86c45d433654b3a965cab67530245f3 is 43, key is default/ns:d/1733806638488/Put/seqid=0 2024-12-10T04:57:19,854 WARN [IPC Server handler 2 on default port 35491 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-10T04:57:19,854 WARN [IPC Server handler 2 on default port 35491 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-10T04:57:19,854 WARN [IPC Server handler 2 on default port 35491 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-10T04:57:19,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41657 is added to blk_1073741841_1017 (size=5153) 2024-12-10T04:57:19,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42773 is added to blk_1073741841_1017 (size=5153) 2024-12-10T04:57:19,859 INFO [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/data/hbase/meta/1588230740/.tmp/ns/d86c45d433654b3a965cab67530245f3 2024-12-10T04:57:19,863 INFO [regionserver/6578523f4421:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T04:57:19,870 INFO [regionserver/6578523f4421:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T04:57:19,875 INFO [regionserver/6578523f4421:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T04:57:19,885 DEBUG [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/data/hbase/meta/1588230740/.tmp/table/278db7cb741a4e40b2eaca1205a3f616 is 52, key is TestHBaseWalOnEC/table:state/1733806638989/Put/seqid=0 2024-12-10T04:57:19,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41657 is added to blk_1073741842_1018 (size=5249) 2024-12-10T04:57:19,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42773 is added to blk_1073741842_1018 (size=5249) 2024-12-10T04:57:19,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39149 is added to blk_1073741842_1018 (size=5249) 2024-12-10T04:57:19,893 INFO [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/data/hbase/meta/1588230740/.tmp/table/278db7cb741a4e40b2eaca1205a3f616 2024-12-10T04:57:19,902 DEBUG [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/data/hbase/meta/1588230740/.tmp/info/19adc04ea27949b795dfcfab0b4d1b0b as hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/data/hbase/meta/1588230740/info/19adc04ea27949b795dfcfab0b4d1b0b 2024-12-10T04:57:19,913 INFO [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/data/hbase/meta/1588230740/info/19adc04ea27949b795dfcfab0b4d1b0b, entries=10, sequenceid=11, filesize=6.5 K 2024-12-10T04:57:19,914 DEBUG [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/data/hbase/meta/1588230740/.tmp/ns/d86c45d433654b3a965cab67530245f3 as hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/data/hbase/meta/1588230740/ns/d86c45d433654b3a965cab67530245f3 2024-12-10T04:57:19,924 INFO [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/data/hbase/meta/1588230740/ns/d86c45d433654b3a965cab67530245f3, entries=2, sequenceid=11, filesize=5.0 K 2024-12-10T04:57:19,927 DEBUG [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/data/hbase/meta/1588230740/.tmp/table/278db7cb741a4e40b2eaca1205a3f616 as hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/data/hbase/meta/1588230740/table/278db7cb741a4e40b2eaca1205a3f616 2024-12-10T04:57:19,931 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45069-0x1000e15c6fc0003, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T04:57:19,931 INFO [RS:2;6578523f4421:45069 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-10T04:57:19,931 INFO [RS:2;6578523f4421:45069 {}] regionserver.HRegionServer(1031): Exiting; stopping=6578523f4421,45069,1733806637387; zookeeper connection closed. 2024-12-10T04:57:19,931 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45069-0x1000e15c6fc0003, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T04:57:19,933 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@70346efc {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@70346efc 2024-12-10T04:57:19,939 INFO [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/data/hbase/meta/1588230740/table/278db7cb741a4e40b2eaca1205a3f616, entries=2, sequenceid=11, filesize=5.1 K 2024-12-10T04:57:19,941 INFO [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 144ms, sequenceid=11, compaction requested=false 2024-12-10T04:57:19,948 DEBUG [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-10T04:57:19,949 DEBUG [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-10T04:57:19,949 INFO [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-10T04:57:19,949 DEBUG [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733806639795Running coprocessor pre-close hooks at 1733806639795Disabling compacts and flushes for region at 1733806639795Disabling writes for close at 1733806639796 (+1 ms)Obtaining lock to block concurrent updates at 1733806639796Preparing flush snapshotting stores in 1588230740 at 1733806639796Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733806639797 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733806639798 (+1 ms)Flushing 1588230740/info: creating writer at 1733806639798Flushing 1588230740/info: appending metadata at 1733806639820 (+22 ms)Flushing 1588230740/info: closing flushed file at 1733806639820Flushing 1588230740/ns: creating writer at 1733806639837 (+17 ms)Flushing 1588230740/ns: appending metadata at 1733806639852 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1733806639852Flushing 1588230740/table: creating writer at 1733806639867 (+15 ms)Flushing 1588230740/table: appending metadata at 1733806639884 (+17 ms)Flushing 1588230740/table: closing flushed file at 1733806639884Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@16021a52: reopening flushed file at 1733806639901 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6671e2b8: reopening flushed file at 1733806639913 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6ff0c065: reopening flushed file at 1733806639924 (+11 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 144ms, sequenceid=11, compaction requested=false at 1733806639941 (+17 ms)Writing region close event to WAL at 1733806639942 (+1 ms)Running coprocessor post-close hooks at 1733806639949 (+7 ms)Closed at 1733806639949 2024-12-10T04:57:19,950 DEBUG [RS_CLOSE_META-regionserver/6578523f4421:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-10T04:57:19,963 INFO [regionserver/6578523f4421:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-10T04:57:19,964 INFO [regionserver/6578523f4421:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-10T04:57:19,969 INFO [regionserver/6578523f4421:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-10T04:57:19,970 INFO [regionserver/6578523f4421:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-10T04:57:19,995 INFO [RS:1;6578523f4421:46693 {}] regionserver.HRegionServer(976): stopping server 6578523f4421,46693,1733806637347; all regions closed. 2024-12-10T04:57:19,995 INFO [RS:0;6578523f4421:33029 {}] regionserver.HRegionServer(976): stopping server 6578523f4421,33029,1733806637309; all regions closed. 2024-12-10T04:57:19,996 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T04:57:19,996 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T04:57:19,996 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T04:57:19,996 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T04:57:19,996 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T04:57:19,996 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T04:57:19,996 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T04:57:19,996 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T04:57:19,996 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T04:57:19,996 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T04:57:19,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41657 is added to blk_1073741836_1012 (size=2751) 2024-12-10T04:57:19,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42773 is added to blk_1073741835_1011 (size=1298) 2024-12-10T04:57:19,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41657 is added to blk_1073741835_1011 (size=1298) 2024-12-10T04:57:19,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39149 is added to blk_1073741835_1011 (size=1298) 2024-12-10T04:57:19,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39149 is added to blk_1073741836_1012 (size=2751) 2024-12-10T04:57:19,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42773 is added to blk_1073741836_1012 (size=2751) 2024-12-10T04:57:20,002 DEBUG [RS:0;6578523f4421:33029 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/oldWALs 2024-12-10T04:57:20,002 INFO [RS:0;6578523f4421:33029 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6578523f4421%2C33029%2C1733806637309.meta:.meta(num 1733806638430) 2024-12-10T04:57:20,002 DEBUG [RS:1;6578523f4421:46693 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/oldWALs 2024-12-10T04:57:20,003 INFO [RS:1;6578523f4421:46693 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6578523f4421%2C46693%2C1733806637347:(num 1733806638105) 2024-12-10T04:57:20,003 DEBUG [RS:1;6578523f4421:46693 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T04:57:20,003 INFO [RS:1;6578523f4421:46693 {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T04:57:20,003 INFO [RS:1;6578523f4421:46693 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-10T04:57:20,003 INFO [RS:1;6578523f4421:46693 {}] hbase.ChoreService(370): Chore service for: regionserver/6578523f4421:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-10T04:57:20,003 INFO [RS:1;6578523f4421:46693 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-10T04:57:20,003 INFO [RS:1;6578523f4421:46693 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-10T04:57:20,003 INFO [regionserver/6578523f4421:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-10T04:57:20,003 INFO [RS:1;6578523f4421:46693 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-10T04:57:20,003 INFO [RS:1;6578523f4421:46693 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-10T04:57:20,003 INFO [RS:1;6578523f4421:46693 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46693 2024-12-10T04:57:20,004 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T04:57:20,004 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T04:57:20,006 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T04:57:20,006 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T04:57:20,006 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T04:57:20,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39149 is added to blk_1073741833_1009 (size=93) 2024-12-10T04:57:20,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41657 is added to blk_1073741833_1009 (size=93) 2024-12-10T04:57:20,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42773 is added to blk_1073741833_1009 (size=93) 2024-12-10T04:57:20,013 DEBUG [RS:0;6578523f4421:33029 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/oldWALs 2024-12-10T04:57:20,013 INFO [RS:0;6578523f4421:33029 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6578523f4421%2C33029%2C1733806637309:(num 1733806638096) 2024-12-10T04:57:20,013 DEBUG [RS:0;6578523f4421:33029 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T04:57:20,013 INFO [RS:0;6578523f4421:33029 {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T04:57:20,014 INFO [RS:0;6578523f4421:33029 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-10T04:57:20,014 INFO [RS:0;6578523f4421:33029 {}] hbase.ChoreService(370): Chore service for: regionserver/6578523f4421:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-10T04:57:20,014 INFO [RS:0;6578523f4421:33029 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-10T04:57:20,014 INFO [regionserver/6578523f4421:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-10T04:57:20,014 INFO [RS:0;6578523f4421:33029 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33029 2024-12-10T04:57:20,077 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46693-0x1000e15c6fc0002, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/6578523f4421,46693,1733806637347 2024-12-10T04:57:20,077 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39099-0x1000e15c6fc0000, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-10T04:57:20,077 INFO [RS:1;6578523f4421:46693 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-10T04:57:20,103 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33029-0x1000e15c6fc0001, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/6578523f4421,33029,1733806637309 2024-12-10T04:57:20,103 INFO [RS:0;6578523f4421:33029 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-10T04:57:20,103 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [6578523f4421,33029,1733806637309] 2024-12-10T04:57:20,126 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/6578523f4421,33029,1733806637309 already deleted, retry=false 2024-12-10T04:57:20,126 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 6578523f4421,33029,1733806637309 expired; onlineServers=1 2024-12-10T04:57:20,126 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [6578523f4421,46693,1733806637347] 2024-12-10T04:57:20,136 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/6578523f4421,46693,1733806637347 already deleted, retry=false 2024-12-10T04:57:20,137 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 6578523f4421,46693,1733806637347 expired; onlineServers=0 2024-12-10T04:57:20,137 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '6578523f4421,39099,1733806637136' ***** 2024-12-10T04:57:20,137 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-10T04:57:20,137 INFO [M:0;6578523f4421:39099 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-10T04:57:20,137 INFO [M:0;6578523f4421:39099 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-10T04:57:20,137 DEBUG [M:0;6578523f4421:39099 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-10T04:57:20,137 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-10T04:57:20,137 DEBUG [M:0;6578523f4421:39099 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-10T04:57:20,137 DEBUG [master/6578523f4421:0:becomeActiveMaster-HFileCleaner.large.0-1733806637768 {}] cleaner.HFileCleaner(306): Exit Thread[master/6578523f4421:0:becomeActiveMaster-HFileCleaner.large.0-1733806637768,5,FailOnTimeoutGroup] 2024-12-10T04:57:20,137 INFO [M:0;6578523f4421:39099 {}] hbase.ChoreService(370): Chore service for: master/6578523f4421:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-10T04:57:20,137 INFO [M:0;6578523f4421:39099 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-10T04:57:20,138 DEBUG [M:0;6578523f4421:39099 {}] master.HMaster(1795): Stopping service threads 2024-12-10T04:57:20,138 INFO [M:0;6578523f4421:39099 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-10T04:57:20,138 INFO [M:0;6578523f4421:39099 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-10T04:57:20,138 DEBUG [master/6578523f4421:0:becomeActiveMaster-HFileCleaner.small.0-1733806637768 {}] cleaner.HFileCleaner(306): Exit Thread[master/6578523f4421:0:becomeActiveMaster-HFileCleaner.small.0-1733806637768,5,FailOnTimeoutGroup] 2024-12-10T04:57:20,138 INFO [M:0;6578523f4421:39099 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-10T04:57:20,138 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-10T04:57:20,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39099-0x1000e15c6fc0000, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-10T04:57:20,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39099-0x1000e15c6fc0000, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T04:57:20,147 DEBUG [M:0;6578523f4421:39099 {}] zookeeper.ZKUtil(347): master:39099-0x1000e15c6fc0000, quorum=127.0.0.1:51167, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-10T04:57:20,147 WARN [M:0;6578523f4421:39099 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-10T04:57:20,148 INFO [M:0;6578523f4421:39099 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/.lastflushedseqids 2024-12-10T04:57:20,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42773 is added to blk_1073741843_1019 (size=127) 2024-12-10T04:57:20,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39149 is added to blk_1073741843_1019 (size=127) 2024-12-10T04:57:20,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41657 is added to blk_1073741843_1019 (size=127) 2024-12-10T04:57:20,157 INFO [M:0;6578523f4421:39099 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-10T04:57:20,158 INFO [M:0;6578523f4421:39099 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-10T04:57:20,158 DEBUG [M:0;6578523f4421:39099 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-10T04:57:20,158 INFO [M:0;6578523f4421:39099 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T04:57:20,158 DEBUG [M:0;6578523f4421:39099 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T04:57:20,158 DEBUG [M:0;6578523f4421:39099 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-10T04:57:20,158 DEBUG [M:0;6578523f4421:39099 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T04:57:20,158 INFO [M:0;6578523f4421:39099 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.83 KB heapSize=34.12 KB 2024-12-10T04:57:20,175 DEBUG [M:0;6578523f4421:39099 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a50e8556d6f748d1bc01e62d162cd7d9 is 82, key is hbase:meta,,1/info:regioninfo/1733806638465/Put/seqid=0 2024-12-10T04:57:20,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41657 is added to blk_1073741844_1020 (size=5672) 2024-12-10T04:57:20,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39149 is added to blk_1073741844_1020 (size=5672) 2024-12-10T04:57:20,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42773 is added to blk_1073741844_1020 (size=5672) 2024-12-10T04:57:20,183 INFO [M:0;6578523f4421:39099 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a50e8556d6f748d1bc01e62d162cd7d9 2024-12-10T04:57:20,204 DEBUG [M:0;6578523f4421:39099 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ff49dede845b4adb9200633160140e11 is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733806638998/Put/seqid=0 2024-12-10T04:57:20,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39149 is added to blk_1073741845_1021 (size=6439) 2024-12-10T04:57:20,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41657 is added to blk_1073741845_1021 (size=6439) 2024-12-10T04:57:20,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42773 is added to blk_1073741845_1021 (size=6439) 2024-12-10T04:57:20,211 INFO [M:0;6578523f4421:39099 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.14 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ff49dede845b4adb9200633160140e11 2024-12-10T04:57:20,216 INFO [RS:1;6578523f4421:46693 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-10T04:57:20,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46693-0x1000e15c6fc0002, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T04:57:20,216 INFO [RS:1;6578523f4421:46693 {}] regionserver.HRegionServer(1031): Exiting; stopping=6578523f4421,46693,1733806637347; zookeeper connection closed. 2024-12-10T04:57:20,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46693-0x1000e15c6fc0002, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T04:57:20,216 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@278b03f {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@278b03f 2024-12-10T04:57:20,246 INFO [RS:0;6578523f4421:33029 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-10T04:57:20,246 INFO [RS:0;6578523f4421:33029 {}] regionserver.HRegionServer(1031): Exiting; stopping=6578523f4421,33029,1733806637309; zookeeper connection closed. 2024-12-10T04:57:20,246 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@db272c6 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@db272c6 2024-12-10T04:57:20,246 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-10T04:57:20,251 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33029-0x1000e15c6fc0001, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T04:57:20,251 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33029-0x1000e15c6fc0001, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T04:57:20,255 DEBUG [M:0;6578523f4421:39099 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/78bfce404c094bf7a50b6ce402533bcf is 69, key is 6578523f4421,33029,1733806637309/rs:state/1733806637873/Put/seqid=0 2024-12-10T04:57:20,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39149 is added to blk_1073741846_1022 (size=5294) 2024-12-10T04:57:20,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41657 is added to blk_1073741846_1022 (size=5294) 2024-12-10T04:57:20,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42773 is added to blk_1073741846_1022 (size=5294) 2024-12-10T04:57:20,262 INFO [M:0;6578523f4421:39099 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/78bfce404c094bf7a50b6ce402533bcf 2024-12-10T04:57:20,267 DEBUG [M:0;6578523f4421:39099 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a50e8556d6f748d1bc01e62d162cd7d9 as hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a50e8556d6f748d1bc01e62d162cd7d9 2024-12-10T04:57:20,274 INFO [M:0;6578523f4421:39099 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a50e8556d6f748d1bc01e62d162cd7d9, entries=8, sequenceid=72, filesize=5.5 K 2024-12-10T04:57:20,275 DEBUG [M:0;6578523f4421:39099 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ff49dede845b4adb9200633160140e11 as hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ff49dede845b4adb9200633160140e11 2024-12-10T04:57:20,281 INFO [M:0;6578523f4421:39099 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ff49dede845b4adb9200633160140e11, entries=8, sequenceid=72, filesize=6.3 K 2024-12-10T04:57:20,282 DEBUG [M:0;6578523f4421:39099 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/78bfce404c094bf7a50b6ce402533bcf as hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/78bfce404c094bf7a50b6ce402533bcf 2024-12-10T04:57:20,289 INFO [M:0;6578523f4421:39099 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35491/user/jenkins/test-data/b192e429-22d4-6af5-591b-60b27156c589/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/78bfce404c094bf7a50b6ce402533bcf, entries=3, sequenceid=72, filesize=5.2 K 2024-12-10T04:57:20,291 INFO [M:0;6578523f4421:39099 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 132ms, sequenceid=72, compaction requested=false 2024-12-10T04:57:20,292 INFO [M:0;6578523f4421:39099 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T04:57:20,292 DEBUG [M:0;6578523f4421:39099 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733806640158Disabling compacts and flushes for region at 1733806640158Disabling writes for close at 1733806640158Obtaining lock to block concurrent updates at 1733806640158Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733806640158Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27471, getHeapSize=34872, getOffHeapSize=0, getCellsCount=85 at 1733806640158Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733806640159 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733806640159Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733806640175 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733806640175Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733806640189 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733806640203 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733806640203Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733806640217 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733806640255 (+38 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733806640255Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3331c175: reopening flushed file at 1733806640266 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@16f83dc9: reopening flushed file at 1733806640274 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@229aa489: reopening flushed file at 1733806640281 (+7 ms)Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 132ms, sequenceid=72, compaction requested=false at 1733806640291 (+10 ms)Writing region close event to WAL at 1733806640292 (+1 ms)Closed at 1733806640292 2024-12-10T04:57:20,292 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T04:57:20,293 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T04:57:20,293 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T04:57:20,293 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T04:57:20,293 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-10T04:57:20,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42773 is added to blk_1073741830_1006 (size=32674) 2024-12-10T04:57:20,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41657 is added to blk_1073741830_1006 (size=32674) 2024-12-10T04:57:20,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39149 is added to blk_1073741830_1006 (size=32674) 2024-12-10T04:57:20,297 INFO [M:0;6578523f4421:39099 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-10T04:57:20,297 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-10T04:57:20,297 INFO [M:0;6578523f4421:39099 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39099 2024-12-10T04:57:20,297 INFO [M:0;6578523f4421:39099 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-10T04:57:20,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39099-0x1000e15c6fc0000, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T04:57:20,405 INFO [M:0;6578523f4421:39099 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-10T04:57:20,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39099-0x1000e15c6fc0000, quorum=127.0.0.1:51167, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T04:57:20,408 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@bff0a43{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T04:57:20,409 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@19dff04d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-10T04:57:20,409 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-10T04:57:20,409 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@38da8210{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-10T04:57:20,409 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6e0095f0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dba231f3-a715-59c1-a486-bc826d044acb/hadoop.log.dir/,STOPPED} 2024-12-10T04:57:20,411 WARN [BP-1730488423-172.17.0.2-1733806634465 heartbeating to localhost/127.0.0.1:35491 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-10T04:57:20,411 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-10T04:57:20,411 WARN [BP-1730488423-172.17.0.2-1733806634465 heartbeating to localhost/127.0.0.1:35491 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1730488423-172.17.0.2-1733806634465 (Datanode Uuid f6f48f00-035b-4a1c-8b6c-08f1930da7b2) service to localhost/127.0.0.1:35491 2024-12-10T04:57:20,411 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-10T04:57:20,412 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dba231f3-a715-59c1-a486-bc826d044acb/cluster_07576dd4-2f1d-984b-8307-16a4a5fff71c/data/data5/current/BP-1730488423-172.17.0.2-1733806634465 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T04:57:20,413 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dba231f3-a715-59c1-a486-bc826d044acb/cluster_07576dd4-2f1d-984b-8307-16a4a5fff71c/data/data6/current/BP-1730488423-172.17.0.2-1733806634465 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T04:57:20,413 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-10T04:57:20,416 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6e5e4927{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T04:57:20,416 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1768a8c1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-10T04:57:20,416 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-10T04:57:20,417 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@bb1336{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-10T04:57:20,417 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5b4297c4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dba231f3-a715-59c1-a486-bc826d044acb/hadoop.log.dir/,STOPPED} 2024-12-10T04:57:20,419 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-10T04:57:20,419 WARN [BP-1730488423-172.17.0.2-1733806634465 heartbeating to localhost/127.0.0.1:35491 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-10T04:57:20,419 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-10T04:57:20,419 WARN [BP-1730488423-172.17.0.2-1733806634465 heartbeating to localhost/127.0.0.1:35491 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1730488423-172.17.0.2-1733806634465 (Datanode Uuid 477fa2ae-3c0a-4e26-b71f-eedbdb12b430) service to localhost/127.0.0.1:35491 2024-12-10T04:57:20,419 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dba231f3-a715-59c1-a486-bc826d044acb/cluster_07576dd4-2f1d-984b-8307-16a4a5fff71c/data/data3/current/BP-1730488423-172.17.0.2-1733806634465 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T04:57:20,420 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dba231f3-a715-59c1-a486-bc826d044acb/cluster_07576dd4-2f1d-984b-8307-16a4a5fff71c/data/data4/current/BP-1730488423-172.17.0.2-1733806634465 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T04:57:20,420 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-10T04:57:20,423 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@38e5384{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T04:57:20,423 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7d6118e0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-10T04:57:20,423 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-10T04:57:20,423 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@61a92fea{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-10T04:57:20,423 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2807f8c2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dba231f3-a715-59c1-a486-bc826d044acb/hadoop.log.dir/,STOPPED} 2024-12-10T04:57:20,424 WARN [BP-1730488423-172.17.0.2-1733806634465 heartbeating to localhost/127.0.0.1:35491 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-10T04:57:20,424 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-10T04:57:20,424 WARN [BP-1730488423-172.17.0.2-1733806634465 heartbeating to localhost/127.0.0.1:35491 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1730488423-172.17.0.2-1733806634465 (Datanode Uuid f7181527-9d5e-4803-8a1f-0c82a73076b9) service to localhost/127.0.0.1:35491 2024-12-10T04:57:20,424 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-10T04:57:20,425 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dba231f3-a715-59c1-a486-bc826d044acb/cluster_07576dd4-2f1d-984b-8307-16a4a5fff71c/data/data1/current/BP-1730488423-172.17.0.2-1733806634465 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T04:57:20,425 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dba231f3-a715-59c1-a486-bc826d044acb/cluster_07576dd4-2f1d-984b-8307-16a4a5fff71c/data/data2/current/BP-1730488423-172.17.0.2-1733806634465 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T04:57:20,425 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-10T04:57:20,430 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7aaeb6cf{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-10T04:57:20,430 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@686c9dd5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-10T04:57:20,430 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-10T04:57:20,431 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@56aa9d3b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-10T04:57:20,431 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@a49b909{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dba231f3-a715-59c1-a486-bc826d044acb/hadoop.log.dir/,STOPPED} 2024-12-10T04:57:20,442 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-10T04:57:20,466 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-10T04:57:20,473 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=148 (was 86) - Thread LEAK? -, OpenFileDescriptor=518 (was 443) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=252 (was 239) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=5115 (was 5368)