2024-11-09 03:51:47,488 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-11-09 03:51:47,501 main DEBUG Took 0.010877 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-09 03:51:47,501 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-09 03:51:47,501 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-09 03:51:47,502 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-09 03:51:47,504 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 03:51:47,515 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-09 03:51:47,535 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 03:51:47,536 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 03:51:47,537 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 03:51:47,538 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 03:51:47,539 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 03:51:47,539 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 03:51:47,541 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 03:51:47,541 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 03:51:47,542 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 03:51:47,542 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 03:51:47,543 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 03:51:47,544 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 03:51:47,545 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 03:51:47,545 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 03:51:47,546 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 03:51:47,546 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 03:51:47,547 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 03:51:47,548 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 03:51:47,548 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 03:51:47,549 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 03:51:47,549 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 03:51:47,550 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 03:51:47,551 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 03:51:47,551 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 03:51:47,552 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 03:51:47,552 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-09 03:51:47,554 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 03:51:47,556 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-09 03:51:47,559 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-09 03:51:47,560 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-09 03:51:47,561 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-09 03:51:47,562 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-09 03:51:47,574 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-09 03:51:47,577 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-09 03:51:47,579 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-09 03:51:47,580 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-09 03:51:47,581 main DEBUG createAppenders(={Console}) 2024-11-09 03:51:47,582 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-11-09 03:51:47,582 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-11-09 03:51:47,583 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-11-09 03:51:47,584 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-09 03:51:47,584 main DEBUG OutputStream closed 2024-11-09 03:51:47,584 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-09 03:51:47,585 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-09 03:51:47,585 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-11-09 03:51:47,670 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-09 03:51:47,672 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-09 03:51:47,673 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-09 03:51:47,674 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-09 03:51:47,675 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-09 03:51:47,676 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-09 03:51:47,676 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-09 03:51:47,677 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-09 03:51:47,677 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-09 03:51:47,677 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-09 03:51:47,677 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-09 03:51:47,678 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-09 03:51:47,678 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-09 03:51:47,678 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-09 03:51:47,679 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-09 03:51:47,679 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-09 03:51:47,679 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-09 03:51:47,680 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-09 03:51:47,682 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-09 03:51:47,683 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-11-09 03:51:47,683 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-09 03:51:47,684 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-11-09T03:51:47,703 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-11-09 03:51:47,705 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-09 03:51:47,706 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-09T03:51:47,961 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd 2024-11-09T03:51:47,986 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd/cluster_ef556e58-c003-7843-94e8-d7c058f624bc, deleteOnExit=true 2024-11-09T03:51:47,988 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd/test.cache.data in system properties and HBase conf 2024-11-09T03:51:47,988 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd/hadoop.tmp.dir in system properties and HBase conf 2024-11-09T03:51:47,989 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd/hadoop.log.dir in system properties and HBase conf 2024-11-09T03:51:47,989 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-09T03:51:47,990 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-09T03:51:47,990 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-09T03:51:48,071 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-09T03:51:48,150 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-09T03:51:48,154 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-09T03:51:48,155 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-09T03:51:48,155 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-09T03:51:48,156 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-09T03:51:48,156 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-09T03:51:48,157 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-09T03:51:48,157 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-09T03:51:48,157 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-09T03:51:48,158 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-09T03:51:48,158 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd/nfs.dump.dir in system properties and HBase conf 2024-11-09T03:51:48,159 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd/java.io.tmpdir in system properties and HBase conf 2024-11-09T03:51:48,159 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-09T03:51:48,159 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-09T03:51:48,160 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-09T03:51:49,251 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-09T03:51:49,322 INFO [Time-limited test {}] log.Log(170): Logging initialized @2525ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-09T03:51:49,389 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T03:51:49,455 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-09T03:51:49,479 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-09T03:51:49,479 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-09T03:51:49,480 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-09T03:51:49,494 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T03:51:49,497 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@760c69c0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd/hadoop.log.dir/,AVAILABLE} 2024-11-09T03:51:49,498 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ce709a8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-09T03:51:49,698 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@62d6efd9{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd/java.io.tmpdir/jetty-localhost-46279-hadoop-hdfs-3_4_1-tests_jar-_-any-4784038849068274043/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-09T03:51:49,707 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@353d35a1{HTTP/1.1, (http/1.1)}{localhost:46279} 2024-11-09T03:51:49,707 INFO [Time-limited test {}] server.Server(415): Started @2911ms 2024-11-09T03:51:50,420 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T03:51:50,428 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-09T03:51:50,429 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-09T03:51:50,429 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-09T03:51:50,429 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-09T03:51:50,430 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3a5de9e4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd/hadoop.log.dir/,AVAILABLE} 2024-11-09T03:51:50,431 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@69893329{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-09T03:51:50,528 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1b97a472{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd/java.io.tmpdir/jetty-localhost-44209-hadoop-hdfs-3_4_1-tests_jar-_-any-1718114075590823447/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T03:51:50,528 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3722a29b{HTTP/1.1, (http/1.1)}{localhost:44209} 2024-11-09T03:51:50,529 INFO [Time-limited test {}] server.Server(415): Started @3733ms 2024-11-09T03:51:50,576 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-09T03:51:50,678 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T03:51:50,683 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-09T03:51:50,685 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-09T03:51:50,685 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-09T03:51:50,686 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-09T03:51:50,687 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@510fec09{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd/hadoop.log.dir/,AVAILABLE} 2024-11-09T03:51:50,687 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@40eb7053{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-09T03:51:50,797 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@353955e9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd/java.io.tmpdir/jetty-localhost-35101-hadoop-hdfs-3_4_1-tests_jar-_-any-7602937217606269002/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T03:51:50,798 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@11738cd8{HTTP/1.1, (http/1.1)}{localhost:35101} 2024-11-09T03:51:50,798 INFO [Time-limited test {}] server.Server(415): Started @4003ms 2024-11-09T03:51:50,801 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-09T03:51:50,841 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T03:51:50,847 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-09T03:51:50,850 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-09T03:51:50,850 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-09T03:51:50,851 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-09T03:51:50,853 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@16cd567f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd/hadoop.log.dir/,AVAILABLE} 2024-11-09T03:51:50,853 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5822645a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-09T03:51:50,977 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3114ae69{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd/java.io.tmpdir/jetty-localhost-44529-hadoop-hdfs-3_4_1-tests_jar-_-any-7648442362183692464/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T03:51:50,978 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3c70a874{HTTP/1.1, (http/1.1)}{localhost:44529} 2024-11-09T03:51:50,978 INFO [Time-limited test {}] server.Server(415): Started @4182ms 2024-11-09T03:51:50,980 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-09T03:51:52,640 WARN [Thread-127 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd/cluster_ef556e58-c003-7843-94e8-d7c058f624bc/data/data6/current/BP-866353255-172.17.0.2-1731124308682/current, will proceed with Du for space computation calculation, 2024-11-09T03:51:52,640 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd/cluster_ef556e58-c003-7843-94e8-d7c058f624bc/data/data3/current/BP-866353255-172.17.0.2-1731124308682/current, will proceed with Du for space computation calculation, 2024-11-09T03:51:52,640 WARN [Thread-126 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd/cluster_ef556e58-c003-7843-94e8-d7c058f624bc/data/data5/current/BP-866353255-172.17.0.2-1731124308682/current, will proceed with Du for space computation calculation, 2024-11-09T03:51:52,640 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd/cluster_ef556e58-c003-7843-94e8-d7c058f624bc/data/data4/current/BP-866353255-172.17.0.2-1731124308682/current, will proceed with Du for space computation calculation, 2024-11-09T03:51:52,640 WARN [Thread-129 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd/cluster_ef556e58-c003-7843-94e8-d7c058f624bc/data/data2/current/BP-866353255-172.17.0.2-1731124308682/current, will proceed with Du for space computation calculation, 2024-11-09T03:51:52,640 WARN [Thread-128 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd/cluster_ef556e58-c003-7843-94e8-d7c058f624bc/data/data1/current/BP-866353255-172.17.0.2-1731124308682/current, will proceed with Du for space computation calculation, 2024-11-09T03:51:52,685 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-09T03:51:52,686 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-09T03:51:52,686 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-09T03:51:52,738 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xde19704bf36bbf57 with lease ID 0x39c5bde210f35e64: Processing first storage report for DS-e95ebd8a-43f7-4fc3-ac35-36c0df3cb523 from datanode DatanodeRegistration(127.0.0.1:43673, datanodeUuid=ca727c6b-311d-4478-9efd-8be485229669, infoPort=39759, infoSecurePort=0, ipcPort=46697, storageInfo=lv=-57;cid=testClusterID;nsid=1905884561;c=1731124308682) 2024-11-09T03:51:52,739 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xde19704bf36bbf57 with lease ID 0x39c5bde210f35e64: from storage DS-e95ebd8a-43f7-4fc3-ac35-36c0df3cb523 node DatanodeRegistration(127.0.0.1:43673, datanodeUuid=ca727c6b-311d-4478-9efd-8be485229669, infoPort=39759, infoSecurePort=0, ipcPort=46697, storageInfo=lv=-57;cid=testClusterID;nsid=1905884561;c=1731124308682), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-09T03:51:52,740 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb66ffde2cd887d9 with lease ID 0x39c5bde210f35e62: Processing first storage report for DS-1e46231d-1d87-4366-b275-6227886af66c from datanode DatanodeRegistration(127.0.0.1:41627, datanodeUuid=b3abe79e-5dc7-4d98-99aa-eb6621fda9bb, infoPort=37191, infoSecurePort=0, ipcPort=45027, storageInfo=lv=-57;cid=testClusterID;nsid=1905884561;c=1731124308682) 2024-11-09T03:51:52,740 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb66ffde2cd887d9 with lease ID 0x39c5bde210f35e62: from storage DS-1e46231d-1d87-4366-b275-6227886af66c node DatanodeRegistration(127.0.0.1:41627, datanodeUuid=b3abe79e-5dc7-4d98-99aa-eb6621fda9bb, infoPort=37191, infoSecurePort=0, ipcPort=45027, storageInfo=lv=-57;cid=testClusterID;nsid=1905884561;c=1731124308682), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-09T03:51:52,740 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x82eda1800ffeecf7 with lease ID 0x39c5bde210f35e63: Processing first storage report for DS-4a47d01f-c5d1-4b79-b26f-bf0d0df6cd70 from datanode DatanodeRegistration(127.0.0.1:37567, datanodeUuid=9c4aa0e5-c185-4e8d-9e7f-791e404231a0, infoPort=42959, infoSecurePort=0, ipcPort=33023, storageInfo=lv=-57;cid=testClusterID;nsid=1905884561;c=1731124308682) 2024-11-09T03:51:52,740 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x82eda1800ffeecf7 with lease ID 0x39c5bde210f35e63: from storage DS-4a47d01f-c5d1-4b79-b26f-bf0d0df6cd70 node DatanodeRegistration(127.0.0.1:37567, datanodeUuid=9c4aa0e5-c185-4e8d-9e7f-791e404231a0, infoPort=42959, infoSecurePort=0, ipcPort=33023, storageInfo=lv=-57;cid=testClusterID;nsid=1905884561;c=1731124308682), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-09T03:51:52,741 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xde19704bf36bbf57 with lease ID 0x39c5bde210f35e64: Processing first storage report for DS-91a36a16-316b-4c7b-b1d2-d84eaded136a from datanode DatanodeRegistration(127.0.0.1:43673, datanodeUuid=ca727c6b-311d-4478-9efd-8be485229669, infoPort=39759, infoSecurePort=0, ipcPort=46697, storageInfo=lv=-57;cid=testClusterID;nsid=1905884561;c=1731124308682) 2024-11-09T03:51:52,741 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xde19704bf36bbf57 with lease ID 0x39c5bde210f35e64: from storage DS-91a36a16-316b-4c7b-b1d2-d84eaded136a node DatanodeRegistration(127.0.0.1:43673, datanodeUuid=ca727c6b-311d-4478-9efd-8be485229669, infoPort=39759, infoSecurePort=0, ipcPort=46697, storageInfo=lv=-57;cid=testClusterID;nsid=1905884561;c=1731124308682), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-09T03:51:52,741 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb66ffde2cd887d9 with lease ID 0x39c5bde210f35e62: Processing first storage report for DS-f483abf7-d7cb-4354-a6b7-f3a672f2cbe3 from datanode DatanodeRegistration(127.0.0.1:41627, datanodeUuid=b3abe79e-5dc7-4d98-99aa-eb6621fda9bb, infoPort=37191, infoSecurePort=0, ipcPort=45027, storageInfo=lv=-57;cid=testClusterID;nsid=1905884561;c=1731124308682) 2024-11-09T03:51:52,741 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb66ffde2cd887d9 with lease ID 0x39c5bde210f35e62: from storage DS-f483abf7-d7cb-4354-a6b7-f3a672f2cbe3 node DatanodeRegistration(127.0.0.1:41627, datanodeUuid=b3abe79e-5dc7-4d98-99aa-eb6621fda9bb, infoPort=37191, infoSecurePort=0, ipcPort=45027, storageInfo=lv=-57;cid=testClusterID;nsid=1905884561;c=1731124308682), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-09T03:51:52,742 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x82eda1800ffeecf7 with lease ID 0x39c5bde210f35e63: Processing first storage report for DS-2c926850-d9c1-4bd2-b725-0ae81275c41e from datanode DatanodeRegistration(127.0.0.1:37567, datanodeUuid=9c4aa0e5-c185-4e8d-9e7f-791e404231a0, infoPort=42959, infoSecurePort=0, ipcPort=33023, storageInfo=lv=-57;cid=testClusterID;nsid=1905884561;c=1731124308682) 2024-11-09T03:51:52,742 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x82eda1800ffeecf7 with lease ID 0x39c5bde210f35e63: from storage DS-2c926850-d9c1-4bd2-b725-0ae81275c41e node DatanodeRegistration(127.0.0.1:37567, datanodeUuid=9c4aa0e5-c185-4e8d-9e7f-791e404231a0, infoPort=42959, infoSecurePort=0, ipcPort=33023, storageInfo=lv=-57;cid=testClusterID;nsid=1905884561;c=1731124308682), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-09T03:51:52,803 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd 2024-11-09T03:51:52,872 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-11-09T03:51:52,931 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=163, OpenFileDescriptor=391, MaxFileDescriptor=1048576, SystemLoadAverage=151, ProcessCount=11, AvailableMemoryMB=6518 2024-11-09T03:51:52,934 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-09T03:51:52,945 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-11-09T03:51:53,021 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd/cluster_ef556e58-c003-7843-94e8-d7c058f624bc/zookeeper_0, clientPort=49864, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd/cluster_ef556e58-c003-7843-94e8-d7c058f624bc/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd/cluster_ef556e58-c003-7843-94e8-d7c058f624bc/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-09T03:51:53,030 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49864 2024-11-09T03:51:53,042 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T03:51:53,046 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T03:51:53,146 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T03:51:53,146 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T03:51:53,186 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1989308819_22 at /127.0.0.1:45010 [Receiving block BP-866353255-172.17.0.2-1731124308682:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:43673:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45010 dst: /127.0.0.1:43673 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T03:51:53,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43673 is added to blk_-9223372036854775792_1002 (size=7) 2024-11-09T03:51:53,606 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T03:51:53,618 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a with version=8 2024-11-09T03:51:53,618 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/hbase-staging 2024-11-09T03:51:53,699 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-09T03:51:53,958 INFO [Time-limited test {}] client.ConnectionUtils(128): master/6ee74a15f3e3:0 server-side Connection retries=45 2024-11-09T03:51:53,968 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T03:51:53,968 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-09T03:51:53,973 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-09T03:51:53,973 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T03:51:53,973 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-09T03:51:54,111 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-09T03:51:54,170 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-09T03:51:54,179 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-09T03:51:54,182 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-09T03:51:54,208 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 97061 (auto-detected) 2024-11-09T03:51:54,209 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-09T03:51:54,227 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36333 2024-11-09T03:51:54,246 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:36333 connecting to ZooKeeper ensemble=127.0.0.1:49864 2024-11-09T03:51:54,501 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:363330x0, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-09T03:51:54,503 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36333-0x1011db8b7570000 connected 2024-11-09T03:51:54,599 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T03:51:54,604 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T03:51:54,615 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36333-0x1011db8b7570000, quorum=127.0.0.1:49864, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T03:51:54,620 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a, hbase.cluster.distributed=false 2024-11-09T03:51:54,647 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36333-0x1011db8b7570000, quorum=127.0.0.1:49864, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-09T03:51:54,652 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36333 2024-11-09T03:51:54,652 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36333 2024-11-09T03:51:54,653 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36333 2024-11-09T03:51:54,653 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36333 2024-11-09T03:51:54,654 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36333 2024-11-09T03:51:54,744 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/6ee74a15f3e3:0 server-side Connection retries=45 2024-11-09T03:51:54,746 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T03:51:54,746 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-09T03:51:54,746 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-09T03:51:54,747 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T03:51:54,747 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-09T03:51:54,749 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-09T03:51:54,752 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-09T03:51:54,753 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36499 2024-11-09T03:51:54,755 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36499 connecting to ZooKeeper ensemble=127.0.0.1:49864 2024-11-09T03:51:54,756 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T03:51:54,758 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T03:51:54,774 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:364990x0, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-09T03:51:54,775 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36499-0x1011db8b7570001 connected 2024-11-09T03:51:54,775 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36499-0x1011db8b7570001, quorum=127.0.0.1:49864, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T03:51:54,780 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-09T03:51:54,788 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-09T03:51:54,790 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36499-0x1011db8b7570001, quorum=127.0.0.1:49864, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-09T03:51:54,795 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36499-0x1011db8b7570001, quorum=127.0.0.1:49864, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-09T03:51:54,796 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36499 2024-11-09T03:51:54,796 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36499 2024-11-09T03:51:54,798 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36499 2024-11-09T03:51:54,800 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36499 2024-11-09T03:51:54,800 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36499 2024-11-09T03:51:54,817 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/6ee74a15f3e3:0 server-side Connection retries=45 2024-11-09T03:51:54,817 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T03:51:54,817 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-09T03:51:54,818 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-09T03:51:54,818 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T03:51:54,818 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-09T03:51:54,818 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-09T03:51:54,818 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-09T03:51:54,819 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37985 2024-11-09T03:51:54,821 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37985 connecting to ZooKeeper ensemble=127.0.0.1:49864 2024-11-09T03:51:54,822 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T03:51:54,825 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T03:51:54,839 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:379850x0, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-09T03:51:54,840 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:379850x0, quorum=127.0.0.1:49864, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T03:51:54,840 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37985-0x1011db8b7570002 connected 2024-11-09T03:51:54,841 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-09T03:51:54,842 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-09T03:51:54,843 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37985-0x1011db8b7570002, quorum=127.0.0.1:49864, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-09T03:51:54,845 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37985-0x1011db8b7570002, quorum=127.0.0.1:49864, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-09T03:51:54,846 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37985 2024-11-09T03:51:54,846 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37985 2024-11-09T03:51:54,847 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37985 2024-11-09T03:51:54,847 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37985 2024-11-09T03:51:54,848 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37985 2024-11-09T03:51:54,863 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/6ee74a15f3e3:0 server-side Connection retries=45 2024-11-09T03:51:54,864 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T03:51:54,864 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-09T03:51:54,864 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-09T03:51:54,864 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T03:51:54,864 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-09T03:51:54,865 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-09T03:51:54,865 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-09T03:51:54,866 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41089 2024-11-09T03:51:54,867 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41089 connecting to ZooKeeper ensemble=127.0.0.1:49864 2024-11-09T03:51:54,868 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T03:51:54,871 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T03:51:54,881 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:410890x0, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-09T03:51:54,882 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:410890x0, quorum=127.0.0.1:49864, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T03:51:54,882 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41089-0x1011db8b7570003 connected 2024-11-09T03:51:54,883 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-09T03:51:54,884 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-09T03:51:54,885 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41089-0x1011db8b7570003, quorum=127.0.0.1:49864, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-09T03:51:54,887 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41089-0x1011db8b7570003, quorum=127.0.0.1:49864, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-09T03:51:54,889 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41089 2024-11-09T03:51:54,890 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41089 2024-11-09T03:51:54,891 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41089 2024-11-09T03:51:54,891 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41089 2024-11-09T03:51:54,892 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41089 2024-11-09T03:51:54,910 DEBUG [M:0;6ee74a15f3e3:36333 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;6ee74a15f3e3:36333 2024-11-09T03:51:54,911 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/6ee74a15f3e3,36333,1731124313807 2024-11-09T03:51:54,924 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36333-0x1011db8b7570000, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T03:51:54,924 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37985-0x1011db8b7570002, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T03:51:54,924 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41089-0x1011db8b7570003, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T03:51:54,924 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36499-0x1011db8b7570001, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T03:51:54,926 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36333-0x1011db8b7570000, quorum=127.0.0.1:49864, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/6ee74a15f3e3,36333,1731124313807 2024-11-09T03:51:54,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41089-0x1011db8b7570003, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-09T03:51:54,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36333-0x1011db8b7570000, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:51:54,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37985-0x1011db8b7570002, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-09T03:51:54,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41089-0x1011db8b7570003, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:51:54,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36499-0x1011db8b7570001, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-09T03:51:54,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37985-0x1011db8b7570002, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:51:54,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36499-0x1011db8b7570001, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:51:54,956 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36333-0x1011db8b7570000, quorum=127.0.0.1:49864, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-09T03:51:54,958 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/6ee74a15f3e3,36333,1731124313807 from backup master directory 2024-11-09T03:51:54,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36333-0x1011db8b7570000, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/6ee74a15f3e3,36333,1731124313807 2024-11-09T03:51:54,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36499-0x1011db8b7570001, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T03:51:54,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41089-0x1011db8b7570003, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T03:51:54,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37985-0x1011db8b7570002, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T03:51:54,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36333-0x1011db8b7570000, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T03:51:54,967 WARN [master/6ee74a15f3e3:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-09T03:51:54,967 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=6ee74a15f3e3,36333,1731124313807 2024-11-09T03:51:54,969 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-09T03:51:54,971 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-09T03:51:55,034 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/hbase.id] with ID: 29e002d1-a5b1-412f-beee-774332e96567 2024-11-09T03:51:55,034 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/.tmp/hbase.id 2024-11-09T03:51:55,041 WARN [master/6ee74a15f3e3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T03:51:55,041 WARN [master/6ee74a15f3e3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T03:51:55,044 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1989308819_22 at /127.0.0.1:52494 [Receiving block BP-866353255-172.17.0.2-1731124308682:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:37567:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52494 dst: /127.0.0.1:37567 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T03:51:55,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37567 is added to blk_-9223372036854775776_1004 (size=42) 2024-11-09T03:51:55,051 WARN [master/6ee74a15f3e3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T03:51:55,051 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/.tmp/hbase.id]:[hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/hbase.id] 2024-11-09T03:51:55,099 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T03:51:55,106 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-09T03:51:55,124 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 16ms. 2024-11-09T03:51:55,134 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36499-0x1011db8b7570001, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:51:55,134 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36333-0x1011db8b7570000, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:51:55,134 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41089-0x1011db8b7570003, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:51:55,134 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37985-0x1011db8b7570002, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:51:55,148 WARN [master/6ee74a15f3e3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T03:51:55,148 WARN [master/6ee74a15f3e3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T03:51:55,151 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1989308819_22 at /127.0.0.1:50980 [Receiving block BP-866353255-172.17.0.2-1731124308682:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:41627:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50980 dst: /127.0.0.1:41627 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T03:51:55,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41627 is added to blk_-9223372036854775760_1006 (size=196) 2024-11-09T03:51:55,158 WARN [master/6ee74a15f3e3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T03:51:55,174 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-09T03:51:55,176 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-09T03:51:55,181 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-09T03:51:55,207 WARN [master/6ee74a15f3e3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T03:51:55,207 WARN [master/6ee74a15f3e3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T03:51:55,210 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1989308819_22 at /127.0.0.1:52514 [Receiving block BP-866353255-172.17.0.2-1731124308682:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:37567:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52514 dst: /127.0.0.1:37567 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T03:51:55,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37567 is added to blk_-9223372036854775744_1008 (size=1189) 2024-11-09T03:51:55,216 WARN [master/6ee74a15f3e3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T03:51:55,234 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/MasterData/data/master/store 2024-11-09T03:51:55,249 WARN [master/6ee74a15f3e3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T03:51:55,250 WARN [master/6ee74a15f3e3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T03:51:55,252 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1989308819_22 at /127.0.0.1:52528 [Receiving block BP-866353255-172.17.0.2-1731124308682:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:37567:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52528 dst: /127.0.0.1:37567 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T03:51:55,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37567 is added to blk_-9223372036854775728_1010 (size=34) 2024-11-09T03:51:55,259 WARN [master/6ee74a15f3e3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T03:51:55,263 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-09T03:51:55,266 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T03:51:55,267 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-09T03:51:55,267 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T03:51:55,268 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T03:51:55,269 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-09T03:51:55,269 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T03:51:55,269 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T03:51:55,271 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731124315267Disabling compacts and flushes for region at 1731124315267Disabling writes for close at 1731124315269 (+2 ms)Writing region close event to WAL at 1731124315269Closed at 1731124315269 2024-11-09T03:51:55,273 WARN [master/6ee74a15f3e3:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/MasterData/data/master/store/.initializing 2024-11-09T03:51:55,274 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/MasterData/WALs/6ee74a15f3e3,36333,1731124313807 2024-11-09T03:51:55,281 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-09T03:51:55,296 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6ee74a15f3e3%2C36333%2C1731124313807, suffix=, logDir=hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/MasterData/WALs/6ee74a15f3e3,36333,1731124313807, archiveDir=hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/MasterData/oldWALs, maxLogs=10 2024-11-09T03:51:55,324 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/MasterData/WALs/6ee74a15f3e3,36333,1731124313807/6ee74a15f3e3%2C36333%2C1731124313807.1731124315300, exclude list is [], retry=0 2024-11-09T03:51:55,343 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:414) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:473) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:468) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T03:51:55,344 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37567,DS-4a47d01f-c5d1-4b79-b26f-bf0d0df6cd70,DISK] 2024-11-09T03:51:55,344 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43673,DS-e95ebd8a-43f7-4fc3-ac35-36c0df3cb523,DISK] 2024-11-09T03:51:55,344 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41627,DS-1e46231d-1d87-4366-b275-6227886af66c,DISK] 2024-11-09T03:51:55,347 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-09T03:51:55,383 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/MasterData/WALs/6ee74a15f3e3,36333,1731124313807/6ee74a15f3e3%2C36333%2C1731124313807.1731124315300 2024-11-09T03:51:55,384 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42959:42959),(127.0.0.1/127.0.0.1:39759:39759),(127.0.0.1/127.0.0.1:37191:37191)] 2024-11-09T03:51:55,385 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-09T03:51:55,385 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T03:51:55,388 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T03:51:55,389 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T03:51:55,428 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-09T03:51:55,451 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-09T03:51:55,454 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T03:51:55,456 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T03:51:55,456 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-09T03:51:55,460 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-09T03:51:55,460 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T03:51:55,461 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T03:51:55,461 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-09T03:51:55,464 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-09T03:51:55,464 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T03:51:55,465 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T03:51:55,465 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-09T03:51:55,468 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-09T03:51:55,468 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T03:51:55,469 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T03:51:55,469 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T03:51:55,473 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-09T03:51:55,474 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-09T03:51:55,479 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T03:51:55,479 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T03:51:55,482 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-09T03:51:55,486 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T03:51:55,492 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-09T03:51:55,494 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66424892, jitterRate=-0.01019197702407837}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-09T03:51:55,502 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731124315402Initializing all the Stores at 1731124315404 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731124315405 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731124315405Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731124315406 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731124315406Cleaning up temporary data from old regions at 1731124315479 (+73 ms)Region opened successfully at 1731124315502 (+23 ms) 2024-11-09T03:51:55,503 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-09T03:51:55,534 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@672621fb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6ee74a15f3e3/172.17.0.2:0 2024-11-09T03:51:55,560 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-09T03:51:55,569 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-09T03:51:55,569 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-09T03:51:55,572 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-09T03:51:55,573 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-09T03:51:55,578 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-09T03:51:55,578 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-09T03:51:55,601 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-09T03:51:55,608 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36333-0x1011db8b7570000, quorum=127.0.0.1:49864, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-09T03:51:55,658 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-09T03:51:55,662 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-09T03:51:55,665 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36333-0x1011db8b7570000, quorum=127.0.0.1:49864, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-09T03:51:55,670 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-09T03:51:55,673 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-09T03:51:55,676 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36333-0x1011db8b7570000, quorum=127.0.0.1:49864, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-09T03:51:55,681 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-09T03:51:55,682 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36333-0x1011db8b7570000, quorum=127.0.0.1:49864, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-09T03:51:55,692 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-09T03:51:55,713 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36333-0x1011db8b7570000, quorum=127.0.0.1:49864, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-09T03:51:55,723 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-09T03:51:55,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37567 is added to blk_-9223372036854775788_1002 (size=7) 2024-11-09T03:51:55,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41627 is added to blk_-9223372036854775789_1002 (size=7) 2024-11-09T03:51:55,734 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41089-0x1011db8b7570003, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-09T03:51:55,734 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36499-0x1011db8b7570001, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-09T03:51:55,734 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41089-0x1011db8b7570003, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:51:55,734 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36499-0x1011db8b7570001, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:51:55,734 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37985-0x1011db8b7570002, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-09T03:51:55,734 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37985-0x1011db8b7570002, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:51:55,735 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36333-0x1011db8b7570000, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-09T03:51:55,735 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36333-0x1011db8b7570000, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:51:55,738 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=6ee74a15f3e3,36333,1731124313807, sessionid=0x1011db8b7570000, setting cluster-up flag (Was=false) 2024-11-09T03:51:55,766 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36333-0x1011db8b7570000, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:51:55,766 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37985-0x1011db8b7570002, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:51:55,766 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36499-0x1011db8b7570001, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:51:55,766 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41089-0x1011db8b7570003, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:51:55,798 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-09T03:51:55,803 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6ee74a15f3e3,36333,1731124313807 2024-11-09T03:51:55,829 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37985-0x1011db8b7570002, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:51:55,829 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36333-0x1011db8b7570000, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:51:55,829 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41089-0x1011db8b7570003, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:51:55,829 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36499-0x1011db8b7570001, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:51:55,861 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-09T03:51:55,864 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6ee74a15f3e3,36333,1731124313807 2024-11-09T03:51:55,873 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-09T03:51:55,899 INFO [RS:0;6ee74a15f3e3:36499 {}] regionserver.HRegionServer(746): ClusterId : 29e002d1-a5b1-412f-beee-774332e96567 2024-11-09T03:51:55,899 INFO [RS:2;6ee74a15f3e3:41089 {}] regionserver.HRegionServer(746): ClusterId : 29e002d1-a5b1-412f-beee-774332e96567 2024-11-09T03:51:55,899 INFO [RS:1;6ee74a15f3e3:37985 {}] regionserver.HRegionServer(746): ClusterId : 29e002d1-a5b1-412f-beee-774332e96567 2024-11-09T03:51:55,901 DEBUG [RS:0;6ee74a15f3e3:36499 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-09T03:51:55,901 DEBUG [RS:1;6ee74a15f3e3:37985 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-09T03:51:55,901 DEBUG [RS:2;6ee74a15f3e3:41089 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-09T03:51:55,915 DEBUG [RS:0;6ee74a15f3e3:36499 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-09T03:51:55,915 DEBUG [RS:1;6ee74a15f3e3:37985 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-09T03:51:55,915 DEBUG [RS:2;6ee74a15f3e3:41089 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-09T03:51:55,915 DEBUG [RS:0;6ee74a15f3e3:36499 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-09T03:51:55,915 DEBUG [RS:1;6ee74a15f3e3:37985 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-09T03:51:55,915 DEBUG [RS:2;6ee74a15f3e3:41089 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-09T03:51:55,935 DEBUG [RS:0;6ee74a15f3e3:36499 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-09T03:51:55,935 DEBUG [RS:2;6ee74a15f3e3:41089 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-09T03:51:55,935 DEBUG [RS:1;6ee74a15f3e3:37985 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-09T03:51:55,936 DEBUG [RS:2;6ee74a15f3e3:41089 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a02c8e8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6ee74a15f3e3/172.17.0.2:0 2024-11-09T03:51:55,936 DEBUG [RS:0;6ee74a15f3e3:36499 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@13aaf12d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6ee74a15f3e3/172.17.0.2:0 2024-11-09T03:51:55,937 DEBUG [RS:1;6ee74a15f3e3:37985 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@714f9b8b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6ee74a15f3e3/172.17.0.2:0 2024-11-09T03:51:55,945 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-09T03:51:55,950 DEBUG [RS:0;6ee74a15f3e3:36499 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;6ee74a15f3e3:36499 2024-11-09T03:51:55,952 INFO [RS:0;6ee74a15f3e3:36499 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-09T03:51:55,953 INFO [RS:0;6ee74a15f3e3:36499 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-09T03:51:55,953 DEBUG [RS:0;6ee74a15f3e3:36499 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-09T03:51:55,954 DEBUG [RS:1;6ee74a15f3e3:37985 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;6ee74a15f3e3:37985 2024-11-09T03:51:55,954 DEBUG [RS:2;6ee74a15f3e3:41089 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;6ee74a15f3e3:41089 2024-11-09T03:51:55,955 INFO [RS:1;6ee74a15f3e3:37985 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-09T03:51:55,955 INFO [RS:2;6ee74a15f3e3:41089 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-09T03:51:55,955 INFO [RS:1;6ee74a15f3e3:37985 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-09T03:51:55,955 INFO [RS:2;6ee74a15f3e3:41089 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-09T03:51:55,955 DEBUG [RS:1;6ee74a15f3e3:37985 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-09T03:51:55,955 DEBUG [RS:2;6ee74a15f3e3:41089 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-09T03:51:55,955 INFO [RS:0;6ee74a15f3e3:36499 {}] regionserver.HRegionServer(2659): reportForDuty to master=6ee74a15f3e3,36333,1731124313807 with port=36499, startcode=1731124314714 2024-11-09T03:51:55,956 INFO [RS:1;6ee74a15f3e3:37985 {}] regionserver.HRegionServer(2659): reportForDuty to master=6ee74a15f3e3,36333,1731124313807 with port=37985, startcode=1731124314816 2024-11-09T03:51:55,956 INFO [RS:2;6ee74a15f3e3:41089 {}] regionserver.HRegionServer(2659): reportForDuty to master=6ee74a15f3e3,36333,1731124313807 with port=41089, startcode=1731124314863 2024-11-09T03:51:55,957 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-09T03:51:55,964 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-09T03:51:55,967 DEBUG [RS:0;6ee74a15f3e3:36499 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-09T03:51:55,967 DEBUG [RS:2;6ee74a15f3e3:41089 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-09T03:51:55,967 DEBUG [RS:1;6ee74a15f3e3:37985 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-09T03:51:55,969 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 6ee74a15f3e3,36333,1731124313807 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-09T03:51:55,977 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/6ee74a15f3e3:0, corePoolSize=5, maxPoolSize=5 2024-11-09T03:51:55,977 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/6ee74a15f3e3:0, corePoolSize=5, maxPoolSize=5 2024-11-09T03:51:55,977 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/6ee74a15f3e3:0, corePoolSize=5, maxPoolSize=5 2024-11-09T03:51:55,977 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/6ee74a15f3e3:0, corePoolSize=5, maxPoolSize=5 2024-11-09T03:51:55,978 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/6ee74a15f3e3:0, corePoolSize=10, maxPoolSize=10 2024-11-09T03:51:55,978 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:51:55,978 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/6ee74a15f3e3:0, corePoolSize=2, maxPoolSize=2 2024-11-09T03:51:55,978 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:51:55,985 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731124345985 2024-11-09T03:51:55,987 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-09T03:51:55,988 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-09T03:51:55,992 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-09T03:51:55,992 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-09T03:51:55,993 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-09T03:51:55,993 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-09T03:51:55,996 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-09T03:51:56,001 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-09T03:51:56,002 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-09T03:51:56,004 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-09T03:51:56,007 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-09T03:51:56,008 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-09T03:51:56,008 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43741, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-09T03:51:56,008 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36771, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-09T03:51:56,008 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57063, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-09T03:51:56,010 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T03:51:56,010 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-09T03:51:56,012 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-09T03:51:56,013 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-09T03:51:56,015 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/6ee74a15f3e3:0:becomeActiveMaster-HFileCleaner.large.0-1731124316014,5,FailOnTimeoutGroup] 2024-11-09T03:51:56,015 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/6ee74a15f3e3:0:becomeActiveMaster-HFileCleaner.small.0-1731124316015,5,FailOnTimeoutGroup] 2024-11-09T03:51:56,015 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-09T03:51:56,015 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-09T03:51:56,016 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-09T03:51:56,017 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-09T03:51:56,017 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36333 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-09T03:51:56,023 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T03:51:56,023 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T03:51:56,024 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36333 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 6ee74a15f3e3,41089,1731124314863 2024-11-09T03:51:56,027 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36333 {}] master.ServerManager(517): Registering regionserver=6ee74a15f3e3,41089,1731124314863 2024-11-09T03:51:56,027 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1989308819_22 at /127.0.0.1:52564 [Receiving block BP-866353255-172.17.0.2-1731124308682:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:37567:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52564 dst: /127.0.0.1:37567 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T03:51:56,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37567 is added to blk_-9223372036854775712_1013 (size=1321) 2024-11-09T03:51:56,038 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36333 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 6ee74a15f3e3,36499,1731124314714 2024-11-09T03:51:56,038 DEBUG [RS:2;6ee74a15f3e3:41089 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a 2024-11-09T03:51:56,038 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36333 {}] master.ServerManager(517): Registering regionserver=6ee74a15f3e3,36499,1731124314714 2024-11-09T03:51:56,038 DEBUG [RS:2;6ee74a15f3e3:41089 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40345 2024-11-09T03:51:56,038 DEBUG [RS:2;6ee74a15f3e3:41089 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-09T03:51:56,038 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T03:51:56,040 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-09T03:51:56,041 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a 2024-11-09T03:51:56,044 DEBUG [RS:0;6ee74a15f3e3:36499 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a 2024-11-09T03:51:56,044 DEBUG [RS:0;6ee74a15f3e3:36499 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40345 2024-11-09T03:51:56,044 DEBUG [RS:0;6ee74a15f3e3:36499 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-09T03:51:56,049 DEBUG [RS:1;6ee74a15f3e3:37985 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-09T03:51:56,049 WARN [RS:1;6ee74a15f3e3:37985 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-09T03:51:56,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36333-0x1011db8b7570000, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-09T03:51:56,052 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T03:51:56,052 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T03:51:56,057 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1989308819_22 at /127.0.0.1:45038 [Receiving block BP-866353255-172.17.0.2-1731124308682:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:43673:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45038 dst: /127.0.0.1:43673 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T03:51:56,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43673 is added to blk_-9223372036854775696_1015 (size=32) 2024-11-09T03:51:56,068 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T03:51:56,069 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T03:51:56,071 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-09T03:51:56,074 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-09T03:51:56,074 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T03:51:56,075 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T03:51:56,076 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-09T03:51:56,078 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-09T03:51:56,078 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T03:51:56,079 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T03:51:56,079 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-09T03:51:56,082 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-09T03:51:56,082 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T03:51:56,083 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T03:51:56,083 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-09T03:51:56,086 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-09T03:51:56,086 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T03:51:56,087 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T03:51:56,087 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-09T03:51:56,088 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/data/hbase/meta/1588230740 2024-11-09T03:51:56,089 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/data/hbase/meta/1588230740 2024-11-09T03:51:56,091 DEBUG [RS:2;6ee74a15f3e3:41089 {}] zookeeper.ZKUtil(111): regionserver:41089-0x1011db8b7570003, quorum=127.0.0.1:49864, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/6ee74a15f3e3,41089,1731124314863 2024-11-09T03:51:56,091 WARN [RS:2;6ee74a15f3e3:41089 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-09T03:51:56,091 INFO [RS:2;6ee74a15f3e3:41089 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-09T03:51:56,091 DEBUG [RS:0;6ee74a15f3e3:36499 {}] zookeeper.ZKUtil(111): regionserver:36499-0x1011db8b7570001, quorum=127.0.0.1:49864, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/6ee74a15f3e3,36499,1731124314714 2024-11-09T03:51:56,091 WARN [RS:0;6ee74a15f3e3:36499 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-09T03:51:56,092 DEBUG [RS:2;6ee74a15f3e3:41089 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/WALs/6ee74a15f3e3,41089,1731124314863 2024-11-09T03:51:56,092 INFO [RS:0;6ee74a15f3e3:36499 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-09T03:51:56,092 DEBUG [RS:0;6ee74a15f3e3:36499 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/WALs/6ee74a15f3e3,36499,1731124314714 2024-11-09T03:51:56,093 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-09T03:51:56,093 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [6ee74a15f3e3,41089,1731124314863] 2024-11-09T03:51:56,093 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-09T03:51:56,093 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [6ee74a15f3e3,36499,1731124314714] 2024-11-09T03:51:56,094 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-09T03:51:56,097 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-09T03:51:56,109 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-09T03:51:56,110 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62243306, jitterRate=-0.07250246405601501}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-09T03:51:56,115 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731124316069Initializing all the Stores at 1731124316071 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731124316071Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731124316071Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731124316071Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731124316071Cleaning up temporary data from old regions at 1731124316093 (+22 ms)Region opened successfully at 1731124316114 (+21 ms) 2024-11-09T03:51:56,115 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-09T03:51:56,115 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-09T03:51:56,115 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-09T03:51:56,115 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-09T03:51:56,115 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-09T03:51:56,116 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-09T03:51:56,117 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731124316115Disabling compacts and flushes for region at 1731124316115Disabling writes for close at 1731124316115Writing region close event to WAL at 1731124316116 (+1 ms)Closed at 1731124316116 2024-11-09T03:51:56,120 INFO [RS:2;6ee74a15f3e3:41089 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-09T03:51:56,120 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-09T03:51:56,120 INFO [RS:0;6ee74a15f3e3:36499 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-09T03:51:56,120 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-09T03:51:56,128 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-09T03:51:56,135 INFO [RS:0;6ee74a15f3e3:36499 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-09T03:51:56,136 INFO [RS:2;6ee74a15f3e3:41089 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-09T03:51:56,137 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-09T03:51:56,141 INFO [RS:2;6ee74a15f3e3:41089 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-09T03:51:56,142 INFO [RS:2;6ee74a15f3e3:41089 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T03:51:56,143 INFO [RS:2;6ee74a15f3e3:41089 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-09T03:51:56,143 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-09T03:51:56,144 INFO [RS:0;6ee74a15f3e3:36499 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-09T03:51:56,145 INFO [RS:0;6ee74a15f3e3:36499 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T03:51:56,147 INFO [RS:0;6ee74a15f3e3:36499 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-09T03:51:56,150 INFO [RS:1;6ee74a15f3e3:37985 {}] regionserver.HRegionServer(2659): reportForDuty to master=6ee74a15f3e3,36333,1731124313807 with port=37985, startcode=1731124314816 2024-11-09T03:51:56,150 INFO [RS:0;6ee74a15f3e3:36499 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-09T03:51:56,150 INFO [RS:2;6ee74a15f3e3:41089 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-09T03:51:56,151 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36333 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 6ee74a15f3e3,37985,1731124314816 2024-11-09T03:51:56,152 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36333 {}] master.ServerManager(517): Registering regionserver=6ee74a15f3e3,37985,1731124314816 2024-11-09T03:51:56,152 INFO [RS:0;6ee74a15f3e3:36499 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-09T03:51:56,152 INFO [RS:2;6ee74a15f3e3:41089 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-09T03:51:56,152 DEBUG [RS:2;6ee74a15f3e3:41089 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:51:56,152 DEBUG [RS:0;6ee74a15f3e3:36499 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:51:56,152 DEBUG [RS:2;6ee74a15f3e3:41089 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:51:56,152 DEBUG [RS:0;6ee74a15f3e3:36499 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:51:56,153 DEBUG [RS:2;6ee74a15f3e3:41089 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:51:56,153 DEBUG [RS:0;6ee74a15f3e3:36499 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:51:56,153 DEBUG [RS:2;6ee74a15f3e3:41089 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:51:56,153 DEBUG [RS:0;6ee74a15f3e3:36499 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:51:56,153 DEBUG [RS:2;6ee74a15f3e3:41089 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:51:56,153 DEBUG [RS:0;6ee74a15f3e3:36499 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:51:56,153 DEBUG [RS:2;6ee74a15f3e3:41089 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/6ee74a15f3e3:0, corePoolSize=2, maxPoolSize=2 2024-11-09T03:51:56,153 DEBUG [RS:0;6ee74a15f3e3:36499 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/6ee74a15f3e3:0, corePoolSize=2, maxPoolSize=2 2024-11-09T03:51:56,153 DEBUG [RS:2;6ee74a15f3e3:41089 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:51:56,153 DEBUG [RS:0;6ee74a15f3e3:36499 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:51:56,153 DEBUG [RS:2;6ee74a15f3e3:41089 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:51:56,153 DEBUG [RS:0;6ee74a15f3e3:36499 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:51:56,153 DEBUG [RS:2;6ee74a15f3e3:41089 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:51:56,153 DEBUG [RS:0;6ee74a15f3e3:36499 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:51:56,154 DEBUG [RS:2;6ee74a15f3e3:41089 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:51:56,154 DEBUG [RS:0;6ee74a15f3e3:36499 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:51:56,154 DEBUG [RS:2;6ee74a15f3e3:41089 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:51:56,154 DEBUG [RS:0;6ee74a15f3e3:36499 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:51:56,154 DEBUG [RS:0;6ee74a15f3e3:36499 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:51:56,154 DEBUG [RS:2;6ee74a15f3e3:41089 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:51:56,154 DEBUG [RS:0;6ee74a15f3e3:36499 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/6ee74a15f3e3:0, corePoolSize=3, maxPoolSize=3 2024-11-09T03:51:56,154 DEBUG [RS:2;6ee74a15f3e3:41089 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/6ee74a15f3e3:0, corePoolSize=3, maxPoolSize=3 2024-11-09T03:51:56,154 DEBUG [RS:0;6ee74a15f3e3:36499 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/6ee74a15f3e3:0, corePoolSize=3, maxPoolSize=3 2024-11-09T03:51:56,154 DEBUG [RS:2;6ee74a15f3e3:41089 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/6ee74a15f3e3:0, corePoolSize=3, maxPoolSize=3 2024-11-09T03:51:56,155 DEBUG [RS:1;6ee74a15f3e3:37985 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a 2024-11-09T03:51:56,155 DEBUG [RS:1;6ee74a15f3e3:37985 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40345 2024-11-09T03:51:56,155 DEBUG [RS:1;6ee74a15f3e3:37985 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-09T03:51:56,155 INFO [RS:2;6ee74a15f3e3:41089 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T03:51:56,155 INFO [RS:2;6ee74a15f3e3:41089 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T03:51:56,156 INFO [RS:2;6ee74a15f3e3:41089 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T03:51:56,156 INFO [RS:2;6ee74a15f3e3:41089 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-09T03:51:56,156 INFO [RS:2;6ee74a15f3e3:41089 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-09T03:51:56,156 INFO [RS:2;6ee74a15f3e3:41089 {}] hbase.ChoreService(168): Chore ScheduledChore name=6ee74a15f3e3,41089,1731124314863-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-09T03:51:56,157 INFO [RS:0;6ee74a15f3e3:36499 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T03:51:56,158 INFO [RS:0;6ee74a15f3e3:36499 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T03:51:56,158 INFO [RS:0;6ee74a15f3e3:36499 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T03:51:56,158 INFO [RS:0;6ee74a15f3e3:36499 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-09T03:51:56,158 INFO [RS:0;6ee74a15f3e3:36499 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-09T03:51:56,158 INFO [RS:0;6ee74a15f3e3:36499 {}] hbase.ChoreService(168): Chore ScheduledChore name=6ee74a15f3e3,36499,1731124314714-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-09T03:51:56,175 INFO [RS:2;6ee74a15f3e3:41089 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-09T03:51:56,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36333-0x1011db8b7570000, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-09T03:51:56,177 DEBUG [RS:1;6ee74a15f3e3:37985 {}] zookeeper.ZKUtil(111): regionserver:37985-0x1011db8b7570002, quorum=127.0.0.1:49864, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/6ee74a15f3e3,37985,1731124314816 2024-11-09T03:51:56,177 WARN [RS:1;6ee74a15f3e3:37985 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-09T03:51:56,177 INFO [RS:2;6ee74a15f3e3:41089 {}] hbase.ChoreService(168): Chore ScheduledChore name=6ee74a15f3e3,41089,1731124314863-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T03:51:56,177 INFO [RS:1;6ee74a15f3e3:37985 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-09T03:51:56,177 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [6ee74a15f3e3,37985,1731124314816] 2024-11-09T03:51:56,177 DEBUG [RS:1;6ee74a15f3e3:37985 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/WALs/6ee74a15f3e3,37985,1731124314816 2024-11-09T03:51:56,177 INFO [RS:2;6ee74a15f3e3:41089 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T03:51:56,177 INFO [RS:2;6ee74a15f3e3:41089 {}] regionserver.Replication(171): 6ee74a15f3e3,41089,1731124314863 started 2024-11-09T03:51:56,178 INFO [RS:0;6ee74a15f3e3:36499 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-09T03:51:56,178 INFO [RS:0;6ee74a15f3e3:36499 {}] hbase.ChoreService(168): Chore ScheduledChore name=6ee74a15f3e3,36499,1731124314714-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T03:51:56,178 INFO [RS:0;6ee74a15f3e3:36499 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T03:51:56,178 INFO [RS:0;6ee74a15f3e3:36499 {}] regionserver.Replication(171): 6ee74a15f3e3,36499,1731124314714 started 2024-11-09T03:51:56,182 INFO [RS:1;6ee74a15f3e3:37985 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-09T03:51:56,186 INFO [RS:1;6ee74a15f3e3:37985 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-09T03:51:56,187 INFO [RS:1;6ee74a15f3e3:37985 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-09T03:51:56,187 INFO [RS:1;6ee74a15f3e3:37985 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T03:51:56,187 INFO [RS:1;6ee74a15f3e3:37985 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-09T03:51:56,189 INFO [RS:1;6ee74a15f3e3:37985 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-09T03:51:56,189 INFO [RS:1;6ee74a15f3e3:37985 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-09T03:51:56,189 DEBUG [RS:1;6ee74a15f3e3:37985 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:51:56,190 DEBUG [RS:1;6ee74a15f3e3:37985 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:51:56,190 DEBUG [RS:1;6ee74a15f3e3:37985 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:51:56,190 DEBUG [RS:1;6ee74a15f3e3:37985 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:51:56,190 DEBUG [RS:1;6ee74a15f3e3:37985 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:51:56,190 DEBUG [RS:1;6ee74a15f3e3:37985 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/6ee74a15f3e3:0, corePoolSize=2, maxPoolSize=2 2024-11-09T03:51:56,190 DEBUG [RS:1;6ee74a15f3e3:37985 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:51:56,190 DEBUG [RS:1;6ee74a15f3e3:37985 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:51:56,190 DEBUG [RS:1;6ee74a15f3e3:37985 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:51:56,191 DEBUG [RS:1;6ee74a15f3e3:37985 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:51:56,191 DEBUG [RS:1;6ee74a15f3e3:37985 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:51:56,191 DEBUG [RS:1;6ee74a15f3e3:37985 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:51:56,191 DEBUG [RS:1;6ee74a15f3e3:37985 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/6ee74a15f3e3:0, corePoolSize=3, maxPoolSize=3 2024-11-09T03:51:56,191 DEBUG [RS:1;6ee74a15f3e3:37985 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/6ee74a15f3e3:0, corePoolSize=3, maxPoolSize=3 2024-11-09T03:51:56,192 INFO [RS:1;6ee74a15f3e3:37985 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T03:51:56,192 INFO [RS:1;6ee74a15f3e3:37985 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T03:51:56,192 INFO [RS:1;6ee74a15f3e3:37985 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T03:51:56,192 INFO [RS:1;6ee74a15f3e3:37985 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-09T03:51:56,192 INFO [RS:1;6ee74a15f3e3:37985 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-09T03:51:56,192 INFO [RS:1;6ee74a15f3e3:37985 {}] hbase.ChoreService(168): Chore ScheduledChore name=6ee74a15f3e3,37985,1731124314816-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-09T03:51:56,199 INFO [RS:2;6ee74a15f3e3:41089 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T03:51:56,199 INFO [RS:0;6ee74a15f3e3:36499 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T03:51:56,199 INFO [RS:2;6ee74a15f3e3:41089 {}] regionserver.HRegionServer(1482): Serving as 6ee74a15f3e3,41089,1731124314863, RpcServer on 6ee74a15f3e3/172.17.0.2:41089, sessionid=0x1011db8b7570003 2024-11-09T03:51:56,199 INFO [RS:0;6ee74a15f3e3:36499 {}] regionserver.HRegionServer(1482): Serving as 6ee74a15f3e3,36499,1731124314714, RpcServer on 6ee74a15f3e3/172.17.0.2:36499, sessionid=0x1011db8b7570001 2024-11-09T03:51:56,200 DEBUG [RS:2;6ee74a15f3e3:41089 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-09T03:51:56,200 DEBUG [RS:0;6ee74a15f3e3:36499 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-09T03:51:56,200 DEBUG [RS:2;6ee74a15f3e3:41089 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 6ee74a15f3e3,41089,1731124314863 2024-11-09T03:51:56,200 DEBUG [RS:0;6ee74a15f3e3:36499 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 6ee74a15f3e3,36499,1731124314714 2024-11-09T03:51:56,200 DEBUG [RS:2;6ee74a15f3e3:41089 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6ee74a15f3e3,41089,1731124314863' 2024-11-09T03:51:56,200 DEBUG [RS:0;6ee74a15f3e3:36499 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6ee74a15f3e3,36499,1731124314714' 2024-11-09T03:51:56,200 DEBUG [RS:2;6ee74a15f3e3:41089 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-09T03:51:56,200 DEBUG [RS:0;6ee74a15f3e3:36499 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-09T03:51:56,201 DEBUG [RS:2;6ee74a15f3e3:41089 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-09T03:51:56,201 DEBUG [RS:0;6ee74a15f3e3:36499 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-09T03:51:56,204 DEBUG [RS:0;6ee74a15f3e3:36499 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-09T03:51:56,204 DEBUG [RS:0;6ee74a15f3e3:36499 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-09T03:51:56,204 DEBUG [RS:0;6ee74a15f3e3:36499 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 6ee74a15f3e3,36499,1731124314714 2024-11-09T03:51:56,204 DEBUG [RS:0;6ee74a15f3e3:36499 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6ee74a15f3e3,36499,1731124314714' 2024-11-09T03:51:56,204 DEBUG [RS:2;6ee74a15f3e3:41089 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-09T03:51:56,204 DEBUG [RS:0;6ee74a15f3e3:36499 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-09T03:51:56,204 DEBUG [RS:2;6ee74a15f3e3:41089 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-09T03:51:56,205 DEBUG [RS:2;6ee74a15f3e3:41089 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 6ee74a15f3e3,41089,1731124314863 2024-11-09T03:51:56,205 DEBUG [RS:2;6ee74a15f3e3:41089 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6ee74a15f3e3,41089,1731124314863' 2024-11-09T03:51:56,205 DEBUG [RS:2;6ee74a15f3e3:41089 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-09T03:51:56,205 DEBUG [RS:0;6ee74a15f3e3:36499 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-09T03:51:56,205 DEBUG [RS:2;6ee74a15f3e3:41089 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-09T03:51:56,206 DEBUG [RS:0;6ee74a15f3e3:36499 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-09T03:51:56,206 DEBUG [RS:2;6ee74a15f3e3:41089 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-09T03:51:56,206 INFO [RS:0;6ee74a15f3e3:36499 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-09T03:51:56,206 INFO [RS:2;6ee74a15f3e3:41089 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-09T03:51:56,206 INFO [RS:0;6ee74a15f3e3:36499 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-09T03:51:56,206 INFO [RS:2;6ee74a15f3e3:41089 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-09T03:51:56,214 INFO [RS:1;6ee74a15f3e3:37985 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-09T03:51:56,214 INFO [RS:1;6ee74a15f3e3:37985 {}] hbase.ChoreService(168): Chore ScheduledChore name=6ee74a15f3e3,37985,1731124314816-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T03:51:56,214 INFO [RS:1;6ee74a15f3e3:37985 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T03:51:56,215 INFO [RS:1;6ee74a15f3e3:37985 {}] regionserver.Replication(171): 6ee74a15f3e3,37985,1731124314816 started 2024-11-09T03:51:56,230 INFO [RS:1;6ee74a15f3e3:37985 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T03:51:56,230 INFO [RS:1;6ee74a15f3e3:37985 {}] regionserver.HRegionServer(1482): Serving as 6ee74a15f3e3,37985,1731124314816, RpcServer on 6ee74a15f3e3/172.17.0.2:37985, sessionid=0x1011db8b7570002 2024-11-09T03:51:56,230 DEBUG [RS:1;6ee74a15f3e3:37985 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-09T03:51:56,230 DEBUG [RS:1;6ee74a15f3e3:37985 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 6ee74a15f3e3,37985,1731124314816 2024-11-09T03:51:56,230 DEBUG [RS:1;6ee74a15f3e3:37985 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6ee74a15f3e3,37985,1731124314816' 2024-11-09T03:51:56,230 DEBUG [RS:1;6ee74a15f3e3:37985 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-09T03:51:56,231 DEBUG [RS:1;6ee74a15f3e3:37985 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-09T03:51:56,232 DEBUG [RS:1;6ee74a15f3e3:37985 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-09T03:51:56,232 DEBUG [RS:1;6ee74a15f3e3:37985 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-09T03:51:56,232 DEBUG [RS:1;6ee74a15f3e3:37985 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 6ee74a15f3e3,37985,1731124314816 2024-11-09T03:51:56,232 DEBUG [RS:1;6ee74a15f3e3:37985 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6ee74a15f3e3,37985,1731124314816' 2024-11-09T03:51:56,232 DEBUG [RS:1;6ee74a15f3e3:37985 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-09T03:51:56,233 DEBUG [RS:1;6ee74a15f3e3:37985 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-09T03:51:56,233 DEBUG [RS:1;6ee74a15f3e3:37985 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-09T03:51:56,233 INFO [RS:1;6ee74a15f3e3:37985 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-09T03:51:56,233 INFO [RS:1;6ee74a15f3e3:37985 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-09T03:51:56,295 WARN [6ee74a15f3e3:36333 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-09T03:51:56,314 INFO [RS:0;6ee74a15f3e3:36499 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-09T03:51:56,314 INFO [RS:2;6ee74a15f3e3:41089 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-09T03:51:56,318 INFO [RS:0;6ee74a15f3e3:36499 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6ee74a15f3e3%2C36499%2C1731124314714, suffix=, logDir=hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/WALs/6ee74a15f3e3,36499,1731124314714, archiveDir=hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/oldWALs, maxLogs=32 2024-11-09T03:51:56,318 INFO [RS:2;6ee74a15f3e3:41089 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6ee74a15f3e3%2C41089%2C1731124314863, suffix=, logDir=hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/WALs/6ee74a15f3e3,41089,1731124314863, archiveDir=hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/oldWALs, maxLogs=32 2024-11-09T03:51:56,334 INFO [RS:1;6ee74a15f3e3:37985 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-09T03:51:56,337 INFO [RS:1;6ee74a15f3e3:37985 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6ee74a15f3e3%2C37985%2C1731124314816, suffix=, logDir=hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/WALs/6ee74a15f3e3,37985,1731124314816, archiveDir=hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/oldWALs, maxLogs=32 2024-11-09T03:51:56,342 DEBUG [RS:0;6ee74a15f3e3:36499 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/WALs/6ee74a15f3e3,36499,1731124314714/6ee74a15f3e3%2C36499%2C1731124314714.1731124316322, exclude list is [], retry=0 2024-11-09T03:51:56,342 DEBUG [RS:2;6ee74a15f3e3:41089 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/WALs/6ee74a15f3e3,41089,1731124314863/6ee74a15f3e3%2C41089%2C1731124314863.1731124316322, exclude list is [], retry=0 2024-11-09T03:51:56,347 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41627,DS-1e46231d-1d87-4366-b275-6227886af66c,DISK] 2024-11-09T03:51:56,347 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37567,DS-4a47d01f-c5d1-4b79-b26f-bf0d0df6cd70,DISK] 2024-11-09T03:51:56,347 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41627,DS-1e46231d-1d87-4366-b275-6227886af66c,DISK] 2024-11-09T03:51:56,348 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43673,DS-e95ebd8a-43f7-4fc3-ac35-36c0df3cb523,DISK] 2024-11-09T03:51:56,348 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37567,DS-4a47d01f-c5d1-4b79-b26f-bf0d0df6cd70,DISK] 2024-11-09T03:51:56,348 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43673,DS-e95ebd8a-43f7-4fc3-ac35-36c0df3cb523,DISK] 2024-11-09T03:51:56,376 DEBUG [RS:1;6ee74a15f3e3:37985 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/WALs/6ee74a15f3e3,37985,1731124314816/6ee74a15f3e3%2C37985%2C1731124314816.1731124316339, exclude list is [], retry=0 2024-11-09T03:51:56,378 INFO [RS:2;6ee74a15f3e3:41089 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/WALs/6ee74a15f3e3,41089,1731124314863/6ee74a15f3e3%2C41089%2C1731124314863.1731124316322 2024-11-09T03:51:56,378 INFO [RS:0;6ee74a15f3e3:36499 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/WALs/6ee74a15f3e3,36499,1731124314714/6ee74a15f3e3%2C36499%2C1731124314714.1731124316322 2024-11-09T03:51:56,379 DEBUG [RS:2;6ee74a15f3e3:41089 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37191:37191),(127.0.0.1/127.0.0.1:42959:42959),(127.0.0.1/127.0.0.1:39759:39759)] 2024-11-09T03:51:56,380 DEBUG [RS:0;6ee74a15f3e3:36499 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42959:42959),(127.0.0.1/127.0.0.1:37191:37191),(127.0.0.1/127.0.0.1:39759:39759)] 2024-11-09T03:51:56,384 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37567,DS-4a47d01f-c5d1-4b79-b26f-bf0d0df6cd70,DISK] 2024-11-09T03:51:56,384 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43673,DS-e95ebd8a-43f7-4fc3-ac35-36c0df3cb523,DISK] 2024-11-09T03:51:56,384 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41627,DS-1e46231d-1d87-4366-b275-6227886af66c,DISK] 2024-11-09T03:51:56,394 INFO [RS:1;6ee74a15f3e3:37985 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/WALs/6ee74a15f3e3,37985,1731124314816/6ee74a15f3e3%2C37985%2C1731124314816.1731124316339 2024-11-09T03:51:56,394 DEBUG [RS:1;6ee74a15f3e3:37985 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39759:39759),(127.0.0.1/127.0.0.1:42959:42959),(127.0.0.1/127.0.0.1:37191:37191)] 2024-11-09T03:51:56,548 DEBUG [6ee74a15f3e3:36333 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-09T03:51:56,561 DEBUG [6ee74a15f3e3:36333 {}] balancer.BalancerClusterState(204): Hosts are {6ee74a15f3e3=0} racks are {/default-rack=0} 2024-11-09T03:51:56,567 DEBUG [6ee74a15f3e3:36333 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-09T03:51:56,567 DEBUG [6ee74a15f3e3:36333 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-09T03:51:56,567 DEBUG [6ee74a15f3e3:36333 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-09T03:51:56,567 DEBUG [6ee74a15f3e3:36333 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-09T03:51:56,567 DEBUG [6ee74a15f3e3:36333 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-09T03:51:56,567 DEBUG [6ee74a15f3e3:36333 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-09T03:51:56,567 INFO [6ee74a15f3e3:36333 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-09T03:51:56,567 INFO [6ee74a15f3e3:36333 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-09T03:51:56,567 INFO [6ee74a15f3e3:36333 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-09T03:51:56,567 DEBUG [6ee74a15f3e3:36333 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-09T03:51:56,573 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=6ee74a15f3e3,41089,1731124314863 2024-11-09T03:51:56,579 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6ee74a15f3e3,41089,1731124314863, state=OPENING 2024-11-09T03:51:56,629 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-09T03:51:56,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36333-0x1011db8b7570000, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:51:56,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37985-0x1011db8b7570002, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:51:56,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36499-0x1011db8b7570001, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:51:56,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41089-0x1011db8b7570003, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:51:56,640 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T03:51:56,640 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T03:51:56,640 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T03:51:56,640 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T03:51:56,642 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-09T03:51:56,643 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=6ee74a15f3e3,41089,1731124314863}] 2024-11-09T03:51:56,824 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-09T03:51:56,825 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41817, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-09T03:51:56,836 INFO [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-09T03:51:56,837 INFO [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-09T03:51:56,837 INFO [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-09T03:51:56,840 INFO [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6ee74a15f3e3%2C41089%2C1731124314863.meta, suffix=.meta, logDir=hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/WALs/6ee74a15f3e3,41089,1731124314863, archiveDir=hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/oldWALs, maxLogs=32 2024-11-09T03:51:56,856 DEBUG [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/WALs/6ee74a15f3e3,41089,1731124314863/6ee74a15f3e3%2C41089%2C1731124314863.meta.1731124316842.meta, exclude list is [], retry=0 2024-11-09T03:51:56,861 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41627,DS-1e46231d-1d87-4366-b275-6227886af66c,DISK] 2024-11-09T03:51:56,861 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37567,DS-4a47d01f-c5d1-4b79-b26f-bf0d0df6cd70,DISK] 2024-11-09T03:51:56,861 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43673,DS-e95ebd8a-43f7-4fc3-ac35-36c0df3cb523,DISK] 2024-11-09T03:51:56,863 INFO [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/WALs/6ee74a15f3e3,41089,1731124314863/6ee74a15f3e3%2C41089%2C1731124314863.meta.1731124316842.meta 2024-11-09T03:51:56,864 DEBUG [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37191:37191),(127.0.0.1/127.0.0.1:42959:42959),(127.0.0.1/127.0.0.1:39759:39759)] 2024-11-09T03:51:56,864 DEBUG [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-09T03:51:56,866 DEBUG [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-09T03:51:56,868 DEBUG [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-09T03:51:56,873 INFO [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-09T03:51:56,877 DEBUG [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-09T03:51:56,877 DEBUG [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T03:51:56,878 DEBUG [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-09T03:51:56,878 DEBUG [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-09T03:51:56,881 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-09T03:51:56,882 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-09T03:51:56,883 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T03:51:56,883 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T03:51:56,884 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-09T03:51:56,885 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-09T03:51:56,885 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T03:51:56,886 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T03:51:56,886 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-09T03:51:56,888 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-09T03:51:56,888 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T03:51:56,889 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T03:51:56,889 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-09T03:51:56,890 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-09T03:51:56,890 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T03:51:56,891 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T03:51:56,891 DEBUG [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-09T03:51:56,892 DEBUG [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/data/hbase/meta/1588230740 2024-11-09T03:51:56,894 DEBUG [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/data/hbase/meta/1588230740 2024-11-09T03:51:56,897 DEBUG [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-09T03:51:56,897 DEBUG [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-09T03:51:56,897 DEBUG [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-09T03:51:56,900 DEBUG [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-09T03:51:56,902 INFO [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62312302, jitterRate=-0.0714743435382843}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-09T03:51:56,902 DEBUG [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-09T03:51:56,904 DEBUG [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731124316878Writing region info on filesystem at 1731124316878Initializing all the Stores at 1731124316880 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731124316880Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731124316881 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731124316881Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731124316881Cleaning up temporary data from old regions at 1731124316897 (+16 ms)Running coprocessor post-open hooks at 1731124316902 (+5 ms)Region opened successfully at 1731124316904 (+2 ms) 2024-11-09T03:51:56,912 INFO [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731124316812 2024-11-09T03:51:56,922 DEBUG [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-09T03:51:56,922 INFO [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-09T03:51:56,924 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=6ee74a15f3e3,41089,1731124314863 2024-11-09T03:51:56,927 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6ee74a15f3e3,41089,1731124314863, state=OPEN 2024-11-09T03:51:56,976 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36333-0x1011db8b7570000, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-09T03:51:56,977 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36499-0x1011db8b7570001, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-09T03:51:56,977 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41089-0x1011db8b7570003, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-09T03:51:56,977 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37985-0x1011db8b7570002, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-09T03:51:56,977 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T03:51:56,977 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T03:51:56,977 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T03:51:56,977 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T03:51:56,978 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=6ee74a15f3e3,41089,1731124314863 2024-11-09T03:51:56,988 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-09T03:51:56,988 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=6ee74a15f3e3,41089,1731124314863 in 335 msec 2024-11-09T03:51:56,994 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-09T03:51:56,994 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 862 msec 2024-11-09T03:51:56,996 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-09T03:51:56,996 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-09T03:51:57,013 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-09T03:51:57,014 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6ee74a15f3e3,41089,1731124314863, seqNum=-1] 2024-11-09T03:51:57,033 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-09T03:51:57,036 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42903, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-09T03:51:57,059 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1570 sec 2024-11-09T03:51:57,060 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731124317059, completionTime=-1 2024-11-09T03:51:57,062 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-09T03:51:57,062 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-09T03:51:57,107 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=3 2024-11-09T03:51:57,107 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731124377107 2024-11-09T03:51:57,107 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731124437107 2024-11-09T03:51:57,107 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 45 msec 2024-11-09T03:51:57,109 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-09T03:51:57,120 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ee74a15f3e3,36333,1731124313807-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T03:51:57,120 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ee74a15f3e3,36333,1731124313807-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T03:51:57,120 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ee74a15f3e3,36333,1731124313807-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T03:51:57,122 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-6ee74a15f3e3:36333, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T03:51:57,122 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-09T03:51:57,123 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-09T03:51:57,130 DEBUG [master/6ee74a15f3e3:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-09T03:51:57,158 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.191sec 2024-11-09T03:51:57,160 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-09T03:51:57,162 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-09T03:51:57,163 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-09T03:51:57,163 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-09T03:51:57,164 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-09T03:51:57,164 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ee74a15f3e3,36333,1731124313807-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-09T03:51:57,165 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ee74a15f3e3,36333,1731124313807-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-09T03:51:57,169 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-09T03:51:57,170 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-09T03:51:57,171 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ee74a15f3e3,36333,1731124313807-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T03:51:57,210 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ee44c0c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-09T03:51:57,214 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-09T03:51:57,214 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-09T03:51:57,216 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 6ee74a15f3e3,36333,-1 for getting cluster id 2024-11-09T03:51:57,218 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-09T03:51:57,226 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '29e002d1-a5b1-412f-beee-774332e96567' 2024-11-09T03:51:57,228 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-09T03:51:57,228 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "29e002d1-a5b1-412f-beee-774332e96567" 2024-11-09T03:51:57,229 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3662b160, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-09T03:51:57,229 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [6ee74a15f3e3,36333,-1] 2024-11-09T03:51:57,232 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-09T03:51:57,234 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T03:51:57,234 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48216, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-09T03:51:57,237 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f4ebf80, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-09T03:51:57,238 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-09T03:51:57,246 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6ee74a15f3e3,41089,1731124314863, seqNum=-1] 2024-11-09T03:51:57,246 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-09T03:51:57,248 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59382, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-09T03:51:57,266 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=6ee74a15f3e3,36333,1731124313807 2024-11-09T03:51:57,270 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-09T03:51:57,275 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 6ee74a15f3e3,36333,1731124313807 2024-11-09T03:51:57,277 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@22495163 2024-11-09T03:51:57,278 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-09T03:51:57,280 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48220, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-09T03:51:57,286 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36333 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-09T03:51:57,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36333 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-11-09T03:51:57,296 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-11-09T03:51:57,298 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36333 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-11-09T03:51:57,298 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T03:51:57,301 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-09T03:51:57,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36333 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-09T03:51:57,309 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T03:51:57,309 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T03:51:57,313 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1989308819_22 at /127.0.0.1:52628 [Receiving block BP-866353255-172.17.0.2-1731124308682:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:37567:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52628 dst: /127.0.0.1:37567 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T03:51:57,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37567 is added to blk_-9223372036854775680_1021 (size=392) 2024-11-09T03:51:57,321 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T03:51:57,323 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => c87013fad15617104e24835b80a21cbf, NAME => 'TestHBaseWalOnEC,,1731124317282.c87013fad15617104e24835b80a21cbf.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a 2024-11-09T03:51:57,329 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T03:51:57,329 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T03:51:57,332 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1989308819_22 at /127.0.0.1:45086 [Receiving block BP-866353255-172.17.0.2-1731124308682:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:43673:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45086 dst: /127.0.0.1:43673 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T03:51:57,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43673 is added to blk_-9223372036854775664_1023 (size=51) 2024-11-09T03:51:57,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36333 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-09T03:51:57,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36333 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-09T03:51:57,741 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T03:51:57,742 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731124317282.c87013fad15617104e24835b80a21cbf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T03:51:57,742 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing c87013fad15617104e24835b80a21cbf, disabling compactions & flushes 2024-11-09T03:51:57,742 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731124317282.c87013fad15617104e24835b80a21cbf. 2024-11-09T03:51:57,743 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731124317282.c87013fad15617104e24835b80a21cbf. 2024-11-09T03:51:57,743 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731124317282.c87013fad15617104e24835b80a21cbf. after waiting 0 ms 2024-11-09T03:51:57,743 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731124317282.c87013fad15617104e24835b80a21cbf. 2024-11-09T03:51:57,743 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731124317282.c87013fad15617104e24835b80a21cbf. 2024-11-09T03:51:57,743 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for c87013fad15617104e24835b80a21cbf: Waiting for close lock at 1731124317742Disabling compacts and flushes for region at 1731124317742Disabling writes for close at 1731124317743 (+1 ms)Writing region close event to WAL at 1731124317743Closed at 1731124317743 2024-11-09T03:51:57,747 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-11-09T03:51:57,754 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1731124317282.c87013fad15617104e24835b80a21cbf.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1731124317747"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731124317747"}]},"ts":"1731124317747"} 2024-11-09T03:51:57,760 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-09T03:51:57,762 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-09T03:51:57,764 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731124317762"}]},"ts":"1731124317762"} 2024-11-09T03:51:57,768 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-11-09T03:51:57,769 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {6ee74a15f3e3=0} racks are {/default-rack=0} 2024-11-09T03:51:57,770 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-09T03:51:57,770 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-09T03:51:57,770 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-09T03:51:57,770 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-09T03:51:57,770 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-09T03:51:57,771 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-09T03:51:57,771 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-09T03:51:57,771 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-09T03:51:57,771 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-09T03:51:57,771 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-09T03:51:57,773 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=c87013fad15617104e24835b80a21cbf, ASSIGN}] 2024-11-09T03:51:57,775 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=c87013fad15617104e24835b80a21cbf, ASSIGN 2024-11-09T03:51:57,777 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=c87013fad15617104e24835b80a21cbf, ASSIGN; state=OFFLINE, location=6ee74a15f3e3,36499,1731124314714; forceNewPlan=false, retain=false 2024-11-09T03:51:57,931 INFO [6ee74a15f3e3:36333 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-09T03:51:57,933 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c87013fad15617104e24835b80a21cbf, regionState=OPENING, regionLocation=6ee74a15f3e3,36499,1731124314714 2024-11-09T03:51:57,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36333 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-09T03:51:57,938 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=c87013fad15617104e24835b80a21cbf, ASSIGN because future has completed 2024-11-09T03:51:57,939 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c87013fad15617104e24835b80a21cbf, server=6ee74a15f3e3,36499,1731124314714}] 2024-11-09T03:51:58,094 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-09T03:51:58,097 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49037, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-09T03:51:58,104 INFO [RS_OPEN_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1731124317282.c87013fad15617104e24835b80a21cbf. 2024-11-09T03:51:58,104 DEBUG [RS_OPEN_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => c87013fad15617104e24835b80a21cbf, NAME => 'TestHBaseWalOnEC,,1731124317282.c87013fad15617104e24835b80a21cbf.', STARTKEY => '', ENDKEY => ''} 2024-11-09T03:51:58,105 DEBUG [RS_OPEN_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC c87013fad15617104e24835b80a21cbf 2024-11-09T03:51:58,105 DEBUG [RS_OPEN_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731124317282.c87013fad15617104e24835b80a21cbf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T03:51:58,105 DEBUG [RS_OPEN_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for c87013fad15617104e24835b80a21cbf 2024-11-09T03:51:58,105 DEBUG [RS_OPEN_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for c87013fad15617104e24835b80a21cbf 2024-11-09T03:51:58,108 INFO [StoreOpener-c87013fad15617104e24835b80a21cbf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region c87013fad15617104e24835b80a21cbf 2024-11-09T03:51:58,112 INFO [StoreOpener-c87013fad15617104e24835b80a21cbf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c87013fad15617104e24835b80a21cbf columnFamilyName cf 2024-11-09T03:51:58,112 DEBUG [StoreOpener-c87013fad15617104e24835b80a21cbf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T03:51:58,114 INFO [StoreOpener-c87013fad15617104e24835b80a21cbf-1 {}] regionserver.HStore(327): Store=c87013fad15617104e24835b80a21cbf/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T03:51:58,114 DEBUG [RS_OPEN_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for c87013fad15617104e24835b80a21cbf 2024-11-09T03:51:58,116 DEBUG [RS_OPEN_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/data/default/TestHBaseWalOnEC/c87013fad15617104e24835b80a21cbf 2024-11-09T03:51:58,116 DEBUG [RS_OPEN_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/data/default/TestHBaseWalOnEC/c87013fad15617104e24835b80a21cbf 2024-11-09T03:51:58,117 DEBUG [RS_OPEN_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for c87013fad15617104e24835b80a21cbf 2024-11-09T03:51:58,117 DEBUG [RS_OPEN_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for c87013fad15617104e24835b80a21cbf 2024-11-09T03:51:58,119 DEBUG [RS_OPEN_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for c87013fad15617104e24835b80a21cbf 2024-11-09T03:51:58,124 DEBUG [RS_OPEN_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/data/default/TestHBaseWalOnEC/c87013fad15617104e24835b80a21cbf/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-09T03:51:58,125 INFO [RS_OPEN_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened c87013fad15617104e24835b80a21cbf; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71225707, jitterRate=0.06134574115276337}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-09T03:51:58,125 DEBUG [RS_OPEN_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c87013fad15617104e24835b80a21cbf 2024-11-09T03:51:58,125 DEBUG [RS_OPEN_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for c87013fad15617104e24835b80a21cbf: Running coprocessor pre-open hook at 1731124318106Writing region info on filesystem at 1731124318106Initializing all the Stores at 1731124318108 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731124318108Cleaning up temporary data from old regions at 1731124318117 (+9 ms)Running coprocessor post-open hooks at 1731124318125 (+8 ms)Region opened successfully at 1731124318125 2024-11-09T03:51:58,127 INFO [RS_OPEN_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1731124317282.c87013fad15617104e24835b80a21cbf., pid=6, masterSystemTime=1731124318093 2024-11-09T03:51:58,130 DEBUG [RS_OPEN_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1731124317282.c87013fad15617104e24835b80a21cbf. 2024-11-09T03:51:58,131 INFO [RS_OPEN_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1731124317282.c87013fad15617104e24835b80a21cbf. 2024-11-09T03:51:58,132 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c87013fad15617104e24835b80a21cbf, regionState=OPEN, openSeqNum=2, regionLocation=6ee74a15f3e3,36499,1731124314714 2024-11-09T03:51:58,135 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c87013fad15617104e24835b80a21cbf, server=6ee74a15f3e3,36499,1731124314714 because future has completed 2024-11-09T03:51:58,141 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-09T03:51:58,141 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure c87013fad15617104e24835b80a21cbf, server=6ee74a15f3e3,36499,1731124314714 in 198 msec 2024-11-09T03:51:58,145 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-09T03:51:58,145 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=c87013fad15617104e24835b80a21cbf, ASSIGN in 369 msec 2024-11-09T03:51:58,147 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-09T03:51:58,147 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731124318147"}]},"ts":"1731124318147"} 2024-11-09T03:51:58,150 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-11-09T03:51:58,152 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-11-09T03:51:58,155 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 863 msec 2024-11-09T03:51:58,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36333 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-09T03:51:58,444 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-11-09T03:51:58,444 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-11-09T03:51:58,447 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T03:51:58,457 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-11-09T03:51:58,458 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T03:51:58,458 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-11-09T03:51:58,465 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1731124317282.c87013fad15617104e24835b80a21cbf., hostname=6ee74a15f3e3,36499,1731124314714, seqNum=2] 2024-11-09T03:51:58,466 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-09T03:51:58,469 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57162, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-09T03:51:58,477 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36333 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-11-09T03:51:58,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36333 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-11-09T03:51:58,485 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-11-09T03:51:58,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36333 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-09T03:51:58,487 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-09T03:51:58,489 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-09T03:51:58,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36333 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-09T03:51:58,656 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36499 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-09T03:51:58,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6ee74a15f3e3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1731124317282.c87013fad15617104e24835b80a21cbf. 2024-11-09T03:51:58,664 INFO [RS_FLUSH_OPERATIONS-regionserver/6ee74a15f3e3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing c87013fad15617104e24835b80a21cbf 1/1 column families, dataSize=32 B heapSize=360 B 2024-11-09T03:51:58,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41627 is added to blk_-9223372036854775773_1004 (size=42) 2024-11-09T03:51:58,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43673 is added to blk_-9223372036854775725_1010 (size=34) 2024-11-09T03:51:58,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41627 is added to blk_-9223372036854775724_1010 (size=34) 2024-11-09T03:51:58,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6ee74a15f3e3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/data/default/TestHBaseWalOnEC/c87013fad15617104e24835b80a21cbf/.tmp/cf/10774c44a19a463ebf1776f329d403e1 is 36, key is row/cf:cq/1731124318469/Put/seqid=0 2024-11-09T03:51:58,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43673 is added to blk_-9223372036854775756_1006 (size=196) 2024-11-09T03:51:58,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37567 is added to blk_-9223372036854775757_1006 (size=196) 2024-11-09T03:51:58,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43673 is added to blk_-9223372036854775772_1004 (size=42) 2024-11-09T03:51:58,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41627 is added to blk_-9223372036854775708_1013 (size=1321) 2024-11-09T03:51:58,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43673 is added to blk_-9223372036854775709_1013 (size=1321) 2024-11-09T03:51:58,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41627 is added to blk_-9223372036854775741_1008 (size=1189) 2024-11-09T03:51:58,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43673 is added to blk_-9223372036854775740_1008 (size=1189) 2024-11-09T03:51:58,733 WARN [RS_FLUSH_OPERATIONS-regionserver/6ee74a15f3e3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T03:51:58,733 WARN [RS_FLUSH_OPERATIONS-regionserver/6ee74a15f3e3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T03:51:58,738 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1220823135_22 at /127.0.0.1:51100 [Receiving block BP-866353255-172.17.0.2-1731124308682:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:41627:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51100 dst: /127.0.0.1:41627 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T03:51:58,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41627 is added to blk_-9223372036854775648_1025 (size=4787) 2024-11-09T03:51:58,746 WARN [RS_FLUSH_OPERATIONS-regionserver/6ee74a15f3e3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T03:51:58,746 INFO [RS_FLUSH_OPERATIONS-regionserver/6ee74a15f3e3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/data/default/TestHBaseWalOnEC/c87013fad15617104e24835b80a21cbf/.tmp/cf/10774c44a19a463ebf1776f329d403e1 2024-11-09T03:51:58,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6ee74a15f3e3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/data/default/TestHBaseWalOnEC/c87013fad15617104e24835b80a21cbf/.tmp/cf/10774c44a19a463ebf1776f329d403e1 as hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/data/default/TestHBaseWalOnEC/c87013fad15617104e24835b80a21cbf/cf/10774c44a19a463ebf1776f329d403e1 2024-11-09T03:51:58,800 INFO [RS_FLUSH_OPERATIONS-regionserver/6ee74a15f3e3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/data/default/TestHBaseWalOnEC/c87013fad15617104e24835b80a21cbf/cf/10774c44a19a463ebf1776f329d403e1, entries=1, sequenceid=5, filesize=4.7 K 2024-11-09T03:51:58,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36333 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-09T03:51:58,807 INFO [RS_FLUSH_OPERATIONS-regionserver/6ee74a15f3e3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for c87013fad15617104e24835b80a21cbf in 145ms, sequenceid=5, compaction requested=false 2024-11-09T03:51:58,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6ee74a15f3e3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-11-09T03:51:58,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6ee74a15f3e3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for c87013fad15617104e24835b80a21cbf: 2024-11-09T03:51:58,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6ee74a15f3e3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1731124317282.c87013fad15617104e24835b80a21cbf. 2024-11-09T03:51:58,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6ee74a15f3e3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-09T03:51:58,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36333 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-09T03:51:58,818 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-09T03:51:58,818 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 326 msec 2024-11-09T03:51:58,821 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 340 msec 2024-11-09T03:51:59,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36333 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-09T03:51:59,113 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-11-09T03:51:59,125 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-09T03:51:59,125 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-09T03:51:59,125 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T03:51:59,129 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T03:51:59,130 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T03:51:59,130 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-09T03:51:59,130 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-09T03:51:59,130 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=2104746072, stopped=false 2024-11-09T03:51:59,131 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=6ee74a15f3e3,36333,1731124313807 2024-11-09T03:51:59,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36333-0x1011db8b7570000, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-09T03:51:59,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41089-0x1011db8b7570003, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-09T03:51:59,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37985-0x1011db8b7570002, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-09T03:51:59,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36499-0x1011db8b7570001, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-09T03:51:59,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36333-0x1011db8b7570000, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:51:59,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41089-0x1011db8b7570003, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:51:59,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37985-0x1011db8b7570002, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:51:59,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36499-0x1011db8b7570001, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:51:59,216 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-09T03:51:59,217 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37985-0x1011db8b7570002, quorum=127.0.0.1:49864, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T03:51:59,217 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-09T03:51:59,218 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T03:51:59,218 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36333-0x1011db8b7570000, quorum=127.0.0.1:49864, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T03:51:59,218 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41089-0x1011db8b7570003, quorum=127.0.0.1:49864, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T03:51:59,218 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T03:51:59,218 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36499-0x1011db8b7570001, quorum=127.0.0.1:49864, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T03:51:59,218 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '6ee74a15f3e3,36499,1731124314714' ***** 2024-11-09T03:51:59,218 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-09T03:51:59,218 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '6ee74a15f3e3,37985,1731124314816' ***** 2024-11-09T03:51:59,218 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-09T03:51:59,219 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '6ee74a15f3e3,41089,1731124314863' ***** 2024-11-09T03:51:59,219 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-09T03:51:59,219 INFO [RS:0;6ee74a15f3e3:36499 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-09T03:51:59,219 INFO [RS:2;6ee74a15f3e3:41089 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-09T03:51:59,219 INFO [RS:0;6ee74a15f3e3:36499 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-09T03:51:59,219 INFO [RS:1;6ee74a15f3e3:37985 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-09T03:51:59,219 INFO [RS:1;6ee74a15f3e3:37985 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-09T03:51:59,219 INFO [RS:0;6ee74a15f3e3:36499 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-09T03:51:59,219 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-09T03:51:59,219 INFO [RS:1;6ee74a15f3e3:37985 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-09T03:51:59,219 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-09T03:51:59,220 INFO [RS:1;6ee74a15f3e3:37985 {}] regionserver.HRegionServer(959): stopping server 6ee74a15f3e3,37985,1731124314816 2024-11-09T03:51:59,220 INFO [RS:0;6ee74a15f3e3:36499 {}] regionserver.HRegionServer(3091): Received CLOSE for c87013fad15617104e24835b80a21cbf 2024-11-09T03:51:59,220 INFO [RS:1;6ee74a15f3e3:37985 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-09T03:51:59,220 INFO [RS:2;6ee74a15f3e3:41089 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-09T03:51:59,220 INFO [RS:2;6ee74a15f3e3:41089 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-09T03:51:59,220 INFO [RS:1;6ee74a15f3e3:37985 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;6ee74a15f3e3:37985. 2024-11-09T03:51:59,220 INFO [RS:2;6ee74a15f3e3:41089 {}] regionserver.HRegionServer(959): stopping server 6ee74a15f3e3,41089,1731124314863 2024-11-09T03:51:59,220 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-09T03:51:59,220 INFO [RS:2;6ee74a15f3e3:41089 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-09T03:51:59,220 INFO [RS:2;6ee74a15f3e3:41089 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;6ee74a15f3e3:41089. 2024-11-09T03:51:59,220 DEBUG [RS:1;6ee74a15f3e3:37985 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T03:51:59,220 DEBUG [RS:1;6ee74a15f3e3:37985 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T03:51:59,220 DEBUG [RS:2;6ee74a15f3e3:41089 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T03:51:59,220 DEBUG [RS:2;6ee74a15f3e3:41089 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T03:51:59,221 INFO [RS:1;6ee74a15f3e3:37985 {}] regionserver.HRegionServer(976): stopping server 6ee74a15f3e3,37985,1731124314816; all regions closed. 2024-11-09T03:51:59,221 INFO [RS:0;6ee74a15f3e3:36499 {}] regionserver.HRegionServer(959): stopping server 6ee74a15f3e3,36499,1731124314714 2024-11-09T03:51:59,221 INFO [RS:2;6ee74a15f3e3:41089 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-09T03:51:59,221 INFO [RS:0;6ee74a15f3e3:36499 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-09T03:51:59,221 INFO [RS:2;6ee74a15f3e3:41089 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-09T03:51:59,221 INFO [RS:2;6ee74a15f3e3:41089 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-09T03:51:59,221 INFO [RS:0;6ee74a15f3e3:36499 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;6ee74a15f3e3:36499. 2024-11-09T03:51:59,221 DEBUG [RS:0;6ee74a15f3e3:36499 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T03:51:59,221 INFO [RS:2;6ee74a15f3e3:41089 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-09T03:51:59,221 DEBUG [RS:0;6ee74a15f3e3:36499 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T03:51:59,221 DEBUG [RS_CLOSE_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing c87013fad15617104e24835b80a21cbf, disabling compactions & flushes 2024-11-09T03:51:59,221 INFO [RS:0;6ee74a15f3e3:36499 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-09T03:51:59,221 INFO [RS_CLOSE_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731124317282.c87013fad15617104e24835b80a21cbf. 2024-11-09T03:51:59,221 DEBUG [RS_CLOSE_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731124317282.c87013fad15617104e24835b80a21cbf. 2024-11-09T03:51:59,221 INFO [RS:2;6ee74a15f3e3:41089 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-09T03:51:59,221 DEBUG [RS:0;6ee74a15f3e3:36499 {}] regionserver.HRegionServer(1325): Online Regions={c87013fad15617104e24835b80a21cbf=TestHBaseWalOnEC,,1731124317282.c87013fad15617104e24835b80a21cbf.} 2024-11-09T03:51:59,221 DEBUG [RS_CLOSE_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731124317282.c87013fad15617104e24835b80a21cbf. after waiting 0 ms 2024-11-09T03:51:59,221 DEBUG [RS:2;6ee74a15f3e3:41089 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-09T03:51:59,221 DEBUG [RS_CLOSE_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731124317282.c87013fad15617104e24835b80a21cbf. 2024-11-09T03:51:59,222 DEBUG [RS:0;6ee74a15f3e3:36499 {}] regionserver.HRegionServer(1351): Waiting on c87013fad15617104e24835b80a21cbf 2024-11-09T03:51:59,222 DEBUG [RS:2;6ee74a15f3e3:41089 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-09T03:51:59,222 DEBUG [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-09T03:51:59,222 INFO [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-09T03:51:59,222 DEBUG [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-09T03:51:59,222 DEBUG [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-09T03:51:59,222 DEBUG [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-09T03:51:59,223 INFO [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-11-09T03:51:59,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37567 is added to blk_1073741828_1018 (size=93) 2024-11-09T03:51:59,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43673 is added to blk_1073741828_1018 (size=93) 2024-11-09T03:51:59,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41627 is added to blk_1073741828_1018 (size=93) 2024-11-09T03:51:59,235 DEBUG [RS:1;6ee74a15f3e3:37985 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/oldWALs 2024-11-09T03:51:59,235 INFO [RS:1;6ee74a15f3e3:37985 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 6ee74a15f3e3%2C37985%2C1731124314816:(num 1731124316339) 2024-11-09T03:51:59,235 DEBUG [RS:1;6ee74a15f3e3:37985 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T03:51:59,235 INFO [RS:1;6ee74a15f3e3:37985 {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T03:51:59,235 INFO [RS:1;6ee74a15f3e3:37985 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-09T03:51:59,236 INFO [RS:1;6ee74a15f3e3:37985 {}] hbase.ChoreService(370): Chore service for: regionserver/6ee74a15f3e3:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-09T03:51:59,236 INFO [RS:1;6ee74a15f3e3:37985 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-09T03:51:59,236 INFO [regionserver/6ee74a15f3e3:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-09T03:51:59,236 INFO [RS:1;6ee74a15f3e3:37985 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-09T03:51:59,236 INFO [RS:1;6ee74a15f3e3:37985 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-09T03:51:59,236 INFO [RS:1;6ee74a15f3e3:37985 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-09T03:51:59,236 INFO [RS:1;6ee74a15f3e3:37985 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37985 2024-11-09T03:51:59,243 DEBUG [RS_CLOSE_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/data/default/TestHBaseWalOnEC/c87013fad15617104e24835b80a21cbf/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-09T03:51:59,245 INFO [RS_CLOSE_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731124317282.c87013fad15617104e24835b80a21cbf. 2024-11-09T03:51:59,246 DEBUG [RS_CLOSE_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for c87013fad15617104e24835b80a21cbf: Waiting for close lock at 1731124319221Running coprocessor pre-close hooks at 1731124319221Disabling compacts and flushes for region at 1731124319221Disabling writes for close at 1731124319221Writing region close event to WAL at 1731124319223 (+2 ms)Running coprocessor post-close hooks at 1731124319244 (+21 ms)Closed at 1731124319245 (+1 ms) 2024-11-09T03:51:59,246 DEBUG [RS_CLOSE_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1731124317282.c87013fad15617104e24835b80a21cbf. 2024-11-09T03:51:59,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37985-0x1011db8b7570002, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/6ee74a15f3e3,37985,1731124314816 2024-11-09T03:51:59,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36333-0x1011db8b7570000, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-09T03:51:59,249 INFO [RS:1;6ee74a15f3e3:37985 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-09T03:51:59,259 DEBUG [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/data/hbase/meta/1588230740/.tmp/info/977bd5b8c4ad4216b523ab51bd47442d is 153, key is TestHBaseWalOnEC,,1731124317282.c87013fad15617104e24835b80a21cbf./info:regioninfo/1731124318131/Put/seqid=0 2024-11-09T03:51:59,260 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [6ee74a15f3e3,37985,1731124314816] 2024-11-09T03:51:59,261 INFO [regionserver/6ee74a15f3e3:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T03:51:59,262 WARN [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T03:51:59,262 INFO [regionserver/6ee74a15f3e3:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T03:51:59,262 WARN [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T03:51:59,266 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1095514908_22 at /127.0.0.1:52686 [Receiving block BP-866353255-172.17.0.2-1731124308682:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:37567:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52686 dst: /127.0.0.1:37567 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T03:51:59,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37567 is added to blk_-9223372036854775632_1027 (size=6637) 2024-11-09T03:51:59,270 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/6ee74a15f3e3,37985,1731124314816 already deleted, retry=false 2024-11-09T03:51:59,270 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 6ee74a15f3e3,37985,1731124314816 expired; onlineServers=2 2024-11-09T03:51:59,271 WARN [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T03:51:59,271 INFO [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/data/hbase/meta/1588230740/.tmp/info/977bd5b8c4ad4216b523ab51bd47442d 2024-11-09T03:51:59,299 INFO [regionserver/6ee74a15f3e3:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T03:51:59,301 DEBUG [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/data/hbase/meta/1588230740/.tmp/ns/acabde72c39f4f3e83d10c1dd264b3da is 43, key is default/ns:d/1731124317041/Put/seqid=0 2024-11-09T03:51:59,303 WARN [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T03:51:59,303 WARN [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T03:51:59,307 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1095514908_22 at /127.0.0.1:52696 [Receiving block BP-866353255-172.17.0.2-1731124308682:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:37567:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52696 dst: /127.0.0.1:37567 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T03:51:59,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37567 is added to blk_-9223372036854775616_1029 (size=5153) 2024-11-09T03:51:59,311 WARN [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T03:51:59,312 INFO [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/data/hbase/meta/1588230740/.tmp/ns/acabde72c39f4f3e83d10c1dd264b3da 2024-11-09T03:51:59,339 DEBUG [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/data/hbase/meta/1588230740/.tmp/table/bda2c9a12b7049bb8493702f487430ac is 52, key is TestHBaseWalOnEC/table:state/1731124318147/Put/seqid=0 2024-11-09T03:51:59,341 WARN [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T03:51:59,341 WARN [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T03:51:59,344 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1095514908_22 at /127.0.0.1:51134 [Receiving block BP-866353255-172.17.0.2-1731124308682:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:41627:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51134 dst: /127.0.0.1:41627 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T03:51:59,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41627 is added to blk_-9223372036854775600_1031 (size=5249) 2024-11-09T03:51:59,349 WARN [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T03:51:59,349 INFO [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/data/hbase/meta/1588230740/.tmp/table/bda2c9a12b7049bb8493702f487430ac 2024-11-09T03:51:59,361 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37985-0x1011db8b7570002, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T03:51:59,361 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37985-0x1011db8b7570002, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T03:51:59,361 DEBUG [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/data/hbase/meta/1588230740/.tmp/info/977bd5b8c4ad4216b523ab51bd47442d as hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/data/hbase/meta/1588230740/info/977bd5b8c4ad4216b523ab51bd47442d 2024-11-09T03:51:59,361 INFO [RS:1;6ee74a15f3e3:37985 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-09T03:51:59,361 INFO [RS:1;6ee74a15f3e3:37985 {}] regionserver.HRegionServer(1031): Exiting; stopping=6ee74a15f3e3,37985,1731124314816; zookeeper connection closed. 2024-11-09T03:51:59,361 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@64dfd800 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@64dfd800 2024-11-09T03:51:59,371 INFO [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/data/hbase/meta/1588230740/info/977bd5b8c4ad4216b523ab51bd47442d, entries=10, sequenceid=11, filesize=6.5 K 2024-11-09T03:51:59,373 DEBUG [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/data/hbase/meta/1588230740/.tmp/ns/acabde72c39f4f3e83d10c1dd264b3da as hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/data/hbase/meta/1588230740/ns/acabde72c39f4f3e83d10c1dd264b3da 2024-11-09T03:51:59,383 INFO [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/data/hbase/meta/1588230740/ns/acabde72c39f4f3e83d10c1dd264b3da, entries=2, sequenceid=11, filesize=5.0 K 2024-11-09T03:51:59,384 DEBUG [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/data/hbase/meta/1588230740/.tmp/table/bda2c9a12b7049bb8493702f487430ac as hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/data/hbase/meta/1588230740/table/bda2c9a12b7049bb8493702f487430ac 2024-11-09T03:51:59,393 INFO [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/data/hbase/meta/1588230740/table/bda2c9a12b7049bb8493702f487430ac, entries=2, sequenceid=11, filesize=5.1 K 2024-11-09T03:51:59,395 INFO [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 173ms, sequenceid=11, compaction requested=false 2024-11-09T03:51:59,395 DEBUG [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-09T03:51:59,403 DEBUG [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-09T03:51:59,404 DEBUG [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-09T03:51:59,404 INFO [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-09T03:51:59,404 DEBUG [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731124319221Running coprocessor pre-close hooks at 1731124319221Disabling compacts and flushes for region at 1731124319221Disabling writes for close at 1731124319222 (+1 ms)Obtaining lock to block concurrent updates at 1731124319223 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731124319223Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1731124319224 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731124319225 (+1 ms)Flushing 1588230740/info: creating writer at 1731124319225Flushing 1588230740/info: appending metadata at 1731124319254 (+29 ms)Flushing 1588230740/info: closing flushed file at 1731124319254Flushing 1588230740/ns: creating writer at 1731124319281 (+27 ms)Flushing 1588230740/ns: appending metadata at 1731124319299 (+18 ms)Flushing 1588230740/ns: closing flushed file at 1731124319299Flushing 1588230740/table: creating writer at 1731124319321 (+22 ms)Flushing 1588230740/table: appending metadata at 1731124319337 (+16 ms)Flushing 1588230740/table: closing flushed file at 1731124319337Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@192affaa: reopening flushed file at 1731124319359 (+22 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7ec303ee: reopening flushed file at 1731124319371 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@37896837: reopening flushed file at 1731124319383 (+12 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 173ms, sequenceid=11, compaction requested=false at 1731124319395 (+12 ms)Writing region close event to WAL at 1731124319397 (+2 ms)Running coprocessor post-close hooks at 1731124319404 (+7 ms)Closed at 1731124319404 2024-11-09T03:51:59,405 DEBUG [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-09T03:51:59,422 INFO [RS:0;6ee74a15f3e3:36499 {}] regionserver.HRegionServer(976): stopping server 6ee74a15f3e3,36499,1731124314714; all regions closed. 2024-11-09T03:51:59,422 INFO [RS:2;6ee74a15f3e3:41089 {}] regionserver.HRegionServer(976): stopping server 6ee74a15f3e3,41089,1731124314863; all regions closed. 2024-11-09T03:51:59,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43673 is added to blk_1073741829_1019 (size=2751) 2024-11-09T03:51:59,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41627 is added to blk_1073741827_1017 (size=1298) 2024-11-09T03:51:59,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37567 is added to blk_1073741827_1017 (size=1298) 2024-11-09T03:51:59,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37567 is added to blk_1073741829_1019 (size=2751) 2024-11-09T03:51:59,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43673 is added to blk_1073741827_1017 (size=1298) 2024-11-09T03:51:59,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41627 is added to blk_1073741829_1019 (size=2751) 2024-11-09T03:51:59,431 DEBUG [RS:0;6ee74a15f3e3:36499 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/oldWALs 2024-11-09T03:51:59,431 INFO [RS:0;6ee74a15f3e3:36499 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 6ee74a15f3e3%2C36499%2C1731124314714:(num 1731124316322) 2024-11-09T03:51:59,431 DEBUG [RS:0;6ee74a15f3e3:36499 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T03:51:59,431 INFO [RS:0;6ee74a15f3e3:36499 {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T03:51:59,431 INFO [RS:0;6ee74a15f3e3:36499 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-09T03:51:59,432 DEBUG [RS:2;6ee74a15f3e3:41089 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/oldWALs 2024-11-09T03:51:59,432 INFO [RS:2;6ee74a15f3e3:41089 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 6ee74a15f3e3%2C41089%2C1731124314863.meta:.meta(num 1731124316842) 2024-11-09T03:51:59,432 INFO [RS:0;6ee74a15f3e3:36499 {}] hbase.ChoreService(370): Chore service for: regionserver/6ee74a15f3e3:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-09T03:51:59,432 INFO [RS:0;6ee74a15f3e3:36499 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-09T03:51:59,432 INFO [RS:0;6ee74a15f3e3:36499 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-09T03:51:59,432 INFO [regionserver/6ee74a15f3e3:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-09T03:51:59,432 INFO [RS:0;6ee74a15f3e3:36499 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-09T03:51:59,432 INFO [RS:0;6ee74a15f3e3:36499 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-09T03:51:59,432 INFO [RS:0;6ee74a15f3e3:36499 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36499 2024-11-09T03:51:59,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41627 is added to blk_1073741826_1016 (size=93) 2024-11-09T03:51:59,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37567 is added to blk_1073741826_1016 (size=93) 2024-11-09T03:51:59,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43673 is added to blk_1073741826_1016 (size=93) 2024-11-09T03:51:59,438 DEBUG [RS:2;6ee74a15f3e3:41089 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/oldWALs 2024-11-09T03:51:59,438 INFO [RS:2;6ee74a15f3e3:41089 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 6ee74a15f3e3%2C41089%2C1731124314863:(num 1731124316322) 2024-11-09T03:51:59,438 DEBUG [RS:2;6ee74a15f3e3:41089 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T03:51:59,438 INFO [RS:2;6ee74a15f3e3:41089 {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T03:51:59,438 INFO [RS:2;6ee74a15f3e3:41089 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-09T03:51:59,438 INFO [RS:2;6ee74a15f3e3:41089 {}] hbase.ChoreService(370): Chore service for: regionserver/6ee74a15f3e3:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-09T03:51:59,439 INFO [RS:2;6ee74a15f3e3:41089 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-09T03:51:59,439 INFO [regionserver/6ee74a15f3e3:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-09T03:51:59,439 INFO [RS:2;6ee74a15f3e3:41089 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41089 2024-11-09T03:51:59,439 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36499-0x1011db8b7570001, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/6ee74a15f3e3,36499,1731124314714 2024-11-09T03:51:59,439 INFO [RS:0;6ee74a15f3e3:36499 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-09T03:51:59,439 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36333-0x1011db8b7570000, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-09T03:51:59,449 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41089-0x1011db8b7570003, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/6ee74a15f3e3,41089,1731124314863 2024-11-09T03:51:59,449 INFO [RS:2;6ee74a15f3e3:41089 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-09T03:51:59,449 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [6ee74a15f3e3,41089,1731124314863] 2024-11-09T03:51:59,460 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/6ee74a15f3e3,41089,1731124314863 already deleted, retry=false 2024-11-09T03:51:59,460 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 6ee74a15f3e3,41089,1731124314863 expired; onlineServers=1 2024-11-09T03:51:59,460 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [6ee74a15f3e3,36499,1731124314714] 2024-11-09T03:51:59,470 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/6ee74a15f3e3,36499,1731124314714 already deleted, retry=false 2024-11-09T03:51:59,470 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 6ee74a15f3e3,36499,1731124314714 expired; onlineServers=0 2024-11-09T03:51:59,471 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '6ee74a15f3e3,36333,1731124313807' ***** 2024-11-09T03:51:59,471 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-09T03:51:59,471 INFO [M:0;6ee74a15f3e3:36333 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-09T03:51:59,471 INFO [M:0;6ee74a15f3e3:36333 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-09T03:51:59,471 DEBUG [M:0;6ee74a15f3e3:36333 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-09T03:51:59,471 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-09T03:51:59,471 DEBUG [M:0;6ee74a15f3e3:36333 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-09T03:51:59,471 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster-HFileCleaner.small.0-1731124316015 {}] cleaner.HFileCleaner(306): Exit Thread[master/6ee74a15f3e3:0:becomeActiveMaster-HFileCleaner.small.0-1731124316015,5,FailOnTimeoutGroup] 2024-11-09T03:51:59,471 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster-HFileCleaner.large.0-1731124316014 {}] cleaner.HFileCleaner(306): Exit Thread[master/6ee74a15f3e3:0:becomeActiveMaster-HFileCleaner.large.0-1731124316014,5,FailOnTimeoutGroup] 2024-11-09T03:51:59,472 INFO [M:0;6ee74a15f3e3:36333 {}] hbase.ChoreService(370): Chore service for: master/6ee74a15f3e3:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-09T03:51:59,472 INFO [M:0;6ee74a15f3e3:36333 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-09T03:51:59,472 DEBUG [M:0;6ee74a15f3e3:36333 {}] master.HMaster(1795): Stopping service threads 2024-11-09T03:51:59,472 INFO [M:0;6ee74a15f3e3:36333 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-09T03:51:59,472 INFO [M:0;6ee74a15f3e3:36333 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-09T03:51:59,473 INFO [M:0;6ee74a15f3e3:36333 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-09T03:51:59,473 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-09T03:51:59,481 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36333-0x1011db8b7570000, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-09T03:51:59,481 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36333-0x1011db8b7570000, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:51:59,481 DEBUG [M:0;6ee74a15f3e3:36333 {}] zookeeper.ZKUtil(347): master:36333-0x1011db8b7570000, quorum=127.0.0.1:49864, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-09T03:51:59,481 WARN [M:0;6ee74a15f3e3:36333 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-09T03:51:59,482 INFO [M:0;6ee74a15f3e3:36333 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/.lastflushedseqids 2024-11-09T03:51:59,490 WARN [M:0;6ee74a15f3e3:36333 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T03:51:59,490 WARN [M:0;6ee74a15f3e3:36333 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T03:51:59,492 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1989308819_22 at /127.0.0.1:45154 [Receiving block BP-866353255-172.17.0.2-1731124308682:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:43673:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45154 dst: /127.0.0.1:43673 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T03:51:59,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43673 is added to blk_-9223372036854775584_1033 (size=127) 2024-11-09T03:51:59,497 WARN [M:0;6ee74a15f3e3:36333 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T03:51:59,497 INFO [M:0;6ee74a15f3e3:36333 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-09T03:51:59,497 INFO [M:0;6ee74a15f3e3:36333 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-09T03:51:59,497 DEBUG [M:0;6ee74a15f3e3:36333 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-09T03:51:59,497 INFO [M:0;6ee74a15f3e3:36333 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T03:51:59,497 DEBUG [M:0;6ee74a15f3e3:36333 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T03:51:59,497 DEBUG [M:0;6ee74a15f3e3:36333 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-09T03:51:59,497 DEBUG [M:0;6ee74a15f3e3:36333 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T03:51:59,498 INFO [M:0;6ee74a15f3e3:36333 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.82 KB heapSize=34.11 KB 2024-11-09T03:51:59,516 DEBUG [M:0;6ee74a15f3e3:36333 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/38aabea2a90a4f71ac21544b21082aba is 82, key is hbase:meta,,1/info:regioninfo/1731124316924/Put/seqid=0 2024-11-09T03:51:59,518 WARN [M:0;6ee74a15f3e3:36333 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T03:51:59,518 WARN [M:0;6ee74a15f3e3:36333 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T03:51:59,521 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1989308819_22 at /127.0.0.1:51158 [Receiving block BP-866353255-172.17.0.2-1731124308682:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:41627:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51158 dst: /127.0.0.1:41627 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T03:51:59,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41627 is added to blk_-9223372036854775568_1035 (size=5672) 2024-11-09T03:51:59,526 WARN [M:0;6ee74a15f3e3:36333 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T03:51:59,526 INFO [M:0;6ee74a15f3e3:36333 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/38aabea2a90a4f71ac21544b21082aba 2024-11-09T03:51:59,550 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36499-0x1011db8b7570001, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T03:51:59,550 INFO [RS:0;6ee74a15f3e3:36499 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-09T03:51:59,550 DEBUG [M:0;6ee74a15f3e3:36333 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e3c685f3af8a486bb57c2cbf1837b522 is 747, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731124318154/Put/seqid=0 2024-11-09T03:51:59,550 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36499-0x1011db8b7570001, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T03:51:59,550 INFO [RS:0;6ee74a15f3e3:36499 {}] regionserver.HRegionServer(1031): Exiting; stopping=6ee74a15f3e3,36499,1731124314714; zookeeper connection closed. 2024-11-09T03:51:59,550 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@65d74650 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@65d74650 2024-11-09T03:51:59,552 WARN [M:0;6ee74a15f3e3:36333 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T03:51:59,552 WARN [M:0;6ee74a15f3e3:36333 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T03:51:59,554 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1989308819_22 at /127.0.0.1:45182 [Receiving block BP-866353255-172.17.0.2-1731124308682:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:43673:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45182 dst: /127.0.0.1:43673 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T03:51:59,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43673 is added to blk_-9223372036854775552_1037 (size=6438) 2024-11-09T03:51:59,559 WARN [M:0;6ee74a15f3e3:36333 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T03:51:59,559 INFO [M:0;6ee74a15f3e3:36333 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.13 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e3c685f3af8a486bb57c2cbf1837b522 2024-11-09T03:51:59,560 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41089-0x1011db8b7570003, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T03:51:59,560 INFO [RS:2;6ee74a15f3e3:41089 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-09T03:51:59,560 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41089-0x1011db8b7570003, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T03:51:59,560 INFO [RS:2;6ee74a15f3e3:41089 {}] regionserver.HRegionServer(1031): Exiting; stopping=6ee74a15f3e3,41089,1731124314863; zookeeper connection closed. 2024-11-09T03:51:59,560 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@75c49001 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@75c49001 2024-11-09T03:51:59,561 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-09T03:51:59,583 DEBUG [M:0;6ee74a15f3e3:36333 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/791e23ea333843308fc4f83bb5f1a21c is 69, key is 6ee74a15f3e3,36499,1731124314714/rs:state/1731124316038/Put/seqid=0 2024-11-09T03:51:59,585 WARN [M:0;6ee74a15f3e3:36333 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T03:51:59,585 WARN [M:0;6ee74a15f3e3:36333 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T03:51:59,587 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1989308819_22 at /127.0.0.1:51172 [Receiving block BP-866353255-172.17.0.2-1731124308682:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:41627:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51172 dst: /127.0.0.1:41627 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T03:51:59,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41627 is added to blk_-9223372036854775536_1039 (size=5294) 2024-11-09T03:51:59,592 WARN [M:0;6ee74a15f3e3:36333 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T03:51:59,592 INFO [M:0;6ee74a15f3e3:36333 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/791e23ea333843308fc4f83bb5f1a21c 2024-11-09T03:51:59,601 DEBUG [M:0;6ee74a15f3e3:36333 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/38aabea2a90a4f71ac21544b21082aba as hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/38aabea2a90a4f71ac21544b21082aba 2024-11-09T03:51:59,610 INFO [M:0;6ee74a15f3e3:36333 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/38aabea2a90a4f71ac21544b21082aba, entries=8, sequenceid=72, filesize=5.5 K 2024-11-09T03:51:59,612 DEBUG [M:0;6ee74a15f3e3:36333 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e3c685f3af8a486bb57c2cbf1837b522 as hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e3c685f3af8a486bb57c2cbf1837b522 2024-11-09T03:51:59,621 INFO [M:0;6ee74a15f3e3:36333 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e3c685f3af8a486bb57c2cbf1837b522, entries=8, sequenceid=72, filesize=6.3 K 2024-11-09T03:51:59,624 DEBUG [M:0;6ee74a15f3e3:36333 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/791e23ea333843308fc4f83bb5f1a21c as hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/791e23ea333843308fc4f83bb5f1a21c 2024-11-09T03:51:59,633 INFO [M:0;6ee74a15f3e3:36333 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/791e23ea333843308fc4f83bb5f1a21c, entries=3, sequenceid=72, filesize=5.2 K 2024-11-09T03:51:59,634 INFO [M:0;6ee74a15f3e3:36333 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.82 KB/27459, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 137ms, sequenceid=72, compaction requested=false 2024-11-09T03:51:59,636 INFO [M:0;6ee74a15f3e3:36333 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T03:51:59,636 DEBUG [M:0;6ee74a15f3e3:36333 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731124319497Disabling compacts and flushes for region at 1731124319497Disabling writes for close at 1731124319497Obtaining lock to block concurrent updates at 1731124319498 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731124319498Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27459, getHeapSize=34864, getOffHeapSize=0, getCellsCount=85 at 1731124319498Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731124319499 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731124319499Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731124319515 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731124319515Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731124319533 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731124319549 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731124319549Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731124319567 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731124319583 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731124319583Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6842c6ac: reopening flushed file at 1731124319600 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6d75ac02: reopening flushed file at 1731124319611 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@b89a40b: reopening flushed file at 1731124319622 (+11 ms)Finished flush of dataSize ~26.82 KB/27459, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 137ms, sequenceid=72, compaction requested=false at 1731124319634 (+12 ms)Writing region close event to WAL at 1731124319636 (+2 ms)Closed at 1731124319636 2024-11-09T03:51:59,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41627 is added to blk_1073741825_1011 (size=32662) 2024-11-09T03:51:59,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43673 is added to blk_1073741825_1011 (size=32662) 2024-11-09T03:51:59,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37567 is added to blk_1073741825_1011 (size=32662) 2024-11-09T03:51:59,640 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-09T03:51:59,640 INFO [M:0;6ee74a15f3e3:36333 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-09T03:51:59,641 INFO [M:0;6ee74a15f3e3:36333 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36333 2024-11-09T03:51:59,641 INFO [M:0;6ee74a15f3e3:36333 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-09T03:51:59,750 INFO [M:0;6ee74a15f3e3:36333 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-09T03:51:59,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36333-0x1011db8b7570000, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T03:51:59,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36333-0x1011db8b7570000, quorum=127.0.0.1:49864, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T03:51:59,792 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3114ae69{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T03:51:59,797 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3c70a874{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-09T03:51:59,797 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-09T03:51:59,797 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5822645a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-09T03:51:59,797 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@16cd567f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd/hadoop.log.dir/,STOPPED} 2024-11-09T03:51:59,799 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-09T03:51:59,799 WARN [BP-866353255-172.17.0.2-1731124308682 heartbeating to localhost/127.0.0.1:40345 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-09T03:51:59,799 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-09T03:51:59,799 WARN [BP-866353255-172.17.0.2-1731124308682 heartbeating to localhost/127.0.0.1:40345 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-866353255-172.17.0.2-1731124308682 (Datanode Uuid ca727c6b-311d-4478-9efd-8be485229669) service to localhost/127.0.0.1:40345 2024-11-09T03:51:59,801 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd/cluster_ef556e58-c003-7843-94e8-d7c058f624bc/data/data5/current/BP-866353255-172.17.0.2-1731124308682 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T03:51:59,801 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd/cluster_ef556e58-c003-7843-94e8-d7c058f624bc/data/data6/current/BP-866353255-172.17.0.2-1731124308682 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T03:51:59,801 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-09T03:51:59,806 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@353955e9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T03:51:59,807 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@11738cd8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-09T03:51:59,807 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-09T03:51:59,807 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@40eb7053{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-09T03:51:59,807 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@510fec09{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd/hadoop.log.dir/,STOPPED} 2024-11-09T03:51:59,808 WARN [BP-866353255-172.17.0.2-1731124308682 heartbeating to localhost/127.0.0.1:40345 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-09T03:51:59,808 WARN [BP-866353255-172.17.0.2-1731124308682 heartbeating to localhost/127.0.0.1:40345 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-866353255-172.17.0.2-1731124308682 (Datanode Uuid b3abe79e-5dc7-4d98-99aa-eb6621fda9bb) service to localhost/127.0.0.1:40345 2024-11-09T03:51:59,808 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-09T03:51:59,808 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-09T03:51:59,809 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd/cluster_ef556e58-c003-7843-94e8-d7c058f624bc/data/data3/current/BP-866353255-172.17.0.2-1731124308682 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T03:51:59,809 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd/cluster_ef556e58-c003-7843-94e8-d7c058f624bc/data/data4/current/BP-866353255-172.17.0.2-1731124308682 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T03:51:59,809 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-09T03:51:59,816 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1b97a472{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T03:51:59,817 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3722a29b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-09T03:51:59,817 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-09T03:51:59,817 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@69893329{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-09T03:51:59,817 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3a5de9e4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd/hadoop.log.dir/,STOPPED} 2024-11-09T03:51:59,818 WARN [BP-866353255-172.17.0.2-1731124308682 heartbeating to localhost/127.0.0.1:40345 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-09T03:51:59,818 WARN [BP-866353255-172.17.0.2-1731124308682 heartbeating to localhost/127.0.0.1:40345 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-866353255-172.17.0.2-1731124308682 (Datanode Uuid 9c4aa0e5-c185-4e8d-9e7f-791e404231a0) service to localhost/127.0.0.1:40345 2024-11-09T03:51:59,819 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd/cluster_ef556e58-c003-7843-94e8-d7c058f624bc/data/data1/current/BP-866353255-172.17.0.2-1731124308682 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T03:51:59,819 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-09T03:51:59,819 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-09T03:51:59,819 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd/cluster_ef556e58-c003-7843-94e8-d7c058f624bc/data/data2/current/BP-866353255-172.17.0.2-1731124308682 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T03:51:59,819 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-09T03:51:59,827 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@62d6efd9{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-09T03:51:59,827 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@353d35a1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-09T03:51:59,827 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-09T03:51:59,828 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ce709a8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-09T03:51:59,828 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@760c69c0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd/hadoop.log.dir/,STOPPED} 2024-11-09T03:51:59,839 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-09T03:51:59,866 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-09T03:51:59,873 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=94 (was 163), OpenFileDescriptor=445 (was 391) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=138 (was 151), ProcessCount=11 (was 11), AvailableMemoryMB=6206 (was 6518) 2024-11-09T03:51:59,878 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=94, OpenFileDescriptor=445, MaxFileDescriptor=1048576, SystemLoadAverage=138, ProcessCount=11, AvailableMemoryMB=6206 2024-11-09T03:51:59,878 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-09T03:51:59,878 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd/hadoop.log.dir so I do NOT create it in target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f 2024-11-09T03:51:59,878 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/b51988ca-77ce-2345-b088-f9668ccbdedd/hadoop.tmp.dir so I do NOT create it in target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f 2024-11-09T03:51:59,878 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f/cluster_df055b96-c134-1ead-2867-9d7e46247600, deleteOnExit=true 2024-11-09T03:51:59,878 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-09T03:51:59,879 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f/test.cache.data in system properties and HBase conf 2024-11-09T03:51:59,879 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f/hadoop.tmp.dir in system properties and HBase conf 2024-11-09T03:51:59,879 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f/hadoop.log.dir in system properties and HBase conf 2024-11-09T03:51:59,879 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-09T03:51:59,879 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-09T03:51:59,879 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-09T03:51:59,879 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-09T03:51:59,879 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-09T03:51:59,879 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-09T03:51:59,880 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-09T03:51:59,880 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-09T03:51:59,880 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-09T03:51:59,880 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-09T03:51:59,880 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-09T03:51:59,880 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-09T03:51:59,880 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-09T03:51:59,880 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f/nfs.dump.dir in system properties and HBase conf 2024-11-09T03:51:59,880 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f/java.io.tmpdir in system properties and HBase conf 2024-11-09T03:51:59,880 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-09T03:51:59,881 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-09T03:51:59,881 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-09T03:52:00,413 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T03:52:00,419 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-09T03:52:00,420 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-09T03:52:00,420 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-09T03:52:00,420 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-09T03:52:00,421 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T03:52:00,422 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@61c928f1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f/hadoop.log.dir/,AVAILABLE} 2024-11-09T03:52:00,422 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70402c4c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-09T03:52:00,516 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6a71642{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f/java.io.tmpdir/jetty-localhost-43395-hadoop-hdfs-3_4_1-tests_jar-_-any-17686853843488804218/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-09T03:52:00,517 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6daae1d7{HTTP/1.1, (http/1.1)}{localhost:43395} 2024-11-09T03:52:00,517 INFO [Time-limited test {}] server.Server(415): Started @13721ms 2024-11-09T03:52:00,860 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T03:52:00,863 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-09T03:52:00,864 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-09T03:52:00,864 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-09T03:52:00,864 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-09T03:52:00,865 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3a5de444{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f/hadoop.log.dir/,AVAILABLE} 2024-11-09T03:52:00,865 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@26256332{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-09T03:52:00,960 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4c99f559{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f/java.io.tmpdir/jetty-localhost-43553-hadoop-hdfs-3_4_1-tests_jar-_-any-4311024961541410966/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T03:52:00,961 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3beb2b8e{HTTP/1.1, (http/1.1)}{localhost:43553} 2024-11-09T03:52:00,961 INFO [Time-limited test {}] server.Server(415): Started @14165ms 2024-11-09T03:52:00,962 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-09T03:52:00,997 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T03:52:01,002 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-09T03:52:01,002 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-09T03:52:01,003 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-09T03:52:01,003 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-09T03:52:01,003 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@119b8466{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f/hadoop.log.dir/,AVAILABLE} 2024-11-09T03:52:01,004 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3700b027{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-09T03:52:01,097 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3ce3bd23{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f/java.io.tmpdir/jetty-localhost-41027-hadoop-hdfs-3_4_1-tests_jar-_-any-4191163985014901069/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T03:52:01,098 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@92b1c57{HTTP/1.1, (http/1.1)}{localhost:41027} 2024-11-09T03:52:01,098 INFO [Time-limited test {}] server.Server(415): Started @14303ms 2024-11-09T03:52:01,100 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-09T03:52:01,132 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T03:52:01,135 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-09T03:52:01,137 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-09T03:52:01,137 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-09T03:52:01,137 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-09T03:52:01,138 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@457c3b58{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f/hadoop.log.dir/,AVAILABLE} 2024-11-09T03:52:01,138 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@18afd393{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-09T03:52:01,231 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@50064390{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f/java.io.tmpdir/jetty-localhost-33931-hadoop-hdfs-3_4_1-tests_jar-_-any-11485746145936805367/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T03:52:01,231 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@aa26abb{HTTP/1.1, (http/1.1)}{localhost:33931} 2024-11-09T03:52:01,231 INFO [Time-limited test {}] server.Server(415): Started @14436ms 2024-11-09T03:52:01,233 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-09T03:52:02,273 WARN [Thread-564 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f/cluster_df055b96-c134-1ead-2867-9d7e46247600/data/data1/current/BP-926588790-172.17.0.2-1731124319905/current, will proceed with Du for space computation calculation, 2024-11-09T03:52:02,273 WARN [Thread-565 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f/cluster_df055b96-c134-1ead-2867-9d7e46247600/data/data2/current/BP-926588790-172.17.0.2-1731124319905/current, will proceed with Du for space computation calculation, 2024-11-09T03:52:02,289 WARN [Thread-505 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-09T03:52:02,292 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa2f96c7a4d130f38 with lease ID 0x8565bff585664baf: Processing first storage report for DS-6fc3f500-93a4-4f0d-90ce-d59871a815a4 from datanode DatanodeRegistration(127.0.0.1:34661, datanodeUuid=4740fc86-1eb6-4b71-be8d-5081bf4a3117, infoPort=36993, infoSecurePort=0, ipcPort=43779, storageInfo=lv=-57;cid=testClusterID;nsid=249398102;c=1731124319905) 2024-11-09T03:52:02,292 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa2f96c7a4d130f38 with lease ID 0x8565bff585664baf: from storage DS-6fc3f500-93a4-4f0d-90ce-d59871a815a4 node DatanodeRegistration(127.0.0.1:34661, datanodeUuid=4740fc86-1eb6-4b71-be8d-5081bf4a3117, infoPort=36993, infoSecurePort=0, ipcPort=43779, storageInfo=lv=-57;cid=testClusterID;nsid=249398102;c=1731124319905), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-09T03:52:02,292 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa2f96c7a4d130f38 with lease ID 0x8565bff585664baf: Processing first storage report for DS-77910081-d17a-4d55-b267-f4b269d9bfd0 from datanode DatanodeRegistration(127.0.0.1:34661, datanodeUuid=4740fc86-1eb6-4b71-be8d-5081bf4a3117, infoPort=36993, infoSecurePort=0, ipcPort=43779, storageInfo=lv=-57;cid=testClusterID;nsid=249398102;c=1731124319905) 2024-11-09T03:52:02,292 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa2f96c7a4d130f38 with lease ID 0x8565bff585664baf: from storage DS-77910081-d17a-4d55-b267-f4b269d9bfd0 node DatanodeRegistration(127.0.0.1:34661, datanodeUuid=4740fc86-1eb6-4b71-be8d-5081bf4a3117, infoPort=36993, infoSecurePort=0, ipcPort=43779, storageInfo=lv=-57;cid=testClusterID;nsid=249398102;c=1731124319905), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-09T03:52:02,394 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-09T03:52:02,466 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-09T03:52:02,467 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-09T03:52:02,493 WARN [Thread-577 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f/cluster_df055b96-c134-1ead-2867-9d7e46247600/data/data3/current/BP-926588790-172.17.0.2-1731124319905/current, will proceed with Du for space computation calculation, 2024-11-09T03:52:02,493 WARN [Thread-578 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f/cluster_df055b96-c134-1ead-2867-9d7e46247600/data/data4/current/BP-926588790-172.17.0.2-1731124319905/current, will proceed with Du for space computation calculation, 2024-11-09T03:52:02,514 WARN [Thread-528 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-09T03:52:02,517 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x47828b5716a24bb4 with lease ID 0x8565bff585664bb0: Processing first storage report for DS-84481e34-7f5c-4fc7-80df-fbfeca32e366 from datanode DatanodeRegistration(127.0.0.1:37363, datanodeUuid=bdaf8bbd-744d-42fc-8a57-0c0603eacbf4, infoPort=37491, infoSecurePort=0, ipcPort=40941, storageInfo=lv=-57;cid=testClusterID;nsid=249398102;c=1731124319905) 2024-11-09T03:52:02,517 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x47828b5716a24bb4 with lease ID 0x8565bff585664bb0: from storage DS-84481e34-7f5c-4fc7-80df-fbfeca32e366 node DatanodeRegistration(127.0.0.1:37363, datanodeUuid=bdaf8bbd-744d-42fc-8a57-0c0603eacbf4, infoPort=37491, infoSecurePort=0, ipcPort=40941, storageInfo=lv=-57;cid=testClusterID;nsid=249398102;c=1731124319905), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-09T03:52:02,518 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x47828b5716a24bb4 with lease ID 0x8565bff585664bb0: Processing first storage report for DS-09cc2829-2a67-4e90-89ff-4f04a81215eb from datanode DatanodeRegistration(127.0.0.1:37363, datanodeUuid=bdaf8bbd-744d-42fc-8a57-0c0603eacbf4, infoPort=37491, infoSecurePort=0, ipcPort=40941, storageInfo=lv=-57;cid=testClusterID;nsid=249398102;c=1731124319905) 2024-11-09T03:52:02,518 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x47828b5716a24bb4 with lease ID 0x8565bff585664bb0: from storage DS-09cc2829-2a67-4e90-89ff-4f04a81215eb node DatanodeRegistration(127.0.0.1:37363, datanodeUuid=bdaf8bbd-744d-42fc-8a57-0c0603eacbf4, infoPort=37491, infoSecurePort=0, ipcPort=40941, storageInfo=lv=-57;cid=testClusterID;nsid=249398102;c=1731124319905), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-09T03:52:02,611 WARN [Thread-588 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f/cluster_df055b96-c134-1ead-2867-9d7e46247600/data/data5/current/BP-926588790-172.17.0.2-1731124319905/current, will proceed with Du for space computation calculation, 2024-11-09T03:52:02,611 WARN [Thread-589 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f/cluster_df055b96-c134-1ead-2867-9d7e46247600/data/data6/current/BP-926588790-172.17.0.2-1731124319905/current, will proceed with Du for space computation calculation, 2024-11-09T03:52:02,629 WARN [Thread-550 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-09T03:52:02,631 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x315cd75a9a3c8f49 with lease ID 0x8565bff585664bb1: Processing first storage report for DS-06cca8c2-2127-4707-b780-e243ed97add3 from datanode DatanodeRegistration(127.0.0.1:34525, datanodeUuid=cbb500c1-020a-4531-a43b-0d0eb62ad732, infoPort=39529, infoSecurePort=0, ipcPort=38105, storageInfo=lv=-57;cid=testClusterID;nsid=249398102;c=1731124319905) 2024-11-09T03:52:02,631 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x315cd75a9a3c8f49 with lease ID 0x8565bff585664bb1: from storage DS-06cca8c2-2127-4707-b780-e243ed97add3 node DatanodeRegistration(127.0.0.1:34525, datanodeUuid=cbb500c1-020a-4531-a43b-0d0eb62ad732, infoPort=39529, infoSecurePort=0, ipcPort=38105, storageInfo=lv=-57;cid=testClusterID;nsid=249398102;c=1731124319905), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-09T03:52:02,631 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x315cd75a9a3c8f49 with lease ID 0x8565bff585664bb1: Processing first storage report for DS-f82b756c-442a-426d-b922-5427bc7c881a from datanode DatanodeRegistration(127.0.0.1:34525, datanodeUuid=cbb500c1-020a-4531-a43b-0d0eb62ad732, infoPort=39529, infoSecurePort=0, ipcPort=38105, storageInfo=lv=-57;cid=testClusterID;nsid=249398102;c=1731124319905) 2024-11-09T03:52:02,631 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x315cd75a9a3c8f49 with lease ID 0x8565bff585664bb1: from storage DS-f82b756c-442a-426d-b922-5427bc7c881a node DatanodeRegistration(127.0.0.1:34525, datanodeUuid=cbb500c1-020a-4531-a43b-0d0eb62ad732, infoPort=39529, infoSecurePort=0, ipcPort=38105, storageInfo=lv=-57;cid=testClusterID;nsid=249398102;c=1731124319905), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-09T03:52:02,680 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f 2024-11-09T03:52:02,685 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f/cluster_df055b96-c134-1ead-2867-9d7e46247600/zookeeper_0, clientPort=54975, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f/cluster_df055b96-c134-1ead-2867-9d7e46247600/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f/cluster_df055b96-c134-1ead-2867-9d7e46247600/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-09T03:52:02,686 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=54975 2024-11-09T03:52:02,686 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T03:52:02,688 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T03:52:02,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34525 is added to blk_1073741825_1001 (size=7) 2024-11-09T03:52:02,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34661 is added to blk_1073741825_1001 (size=7) 2024-11-09T03:52:02,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37363 is added to blk_1073741825_1001 (size=7) 2024-11-09T03:52:02,703 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba with version=8 2024-11-09T03:52:02,703 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:40345/user/jenkins/test-data/5a19ef64-66dc-b8ac-c1c2-33a385895d6a/hbase-staging 2024-11-09T03:52:02,705 INFO [Time-limited test {}] client.ConnectionUtils(128): master/6ee74a15f3e3:0 server-side Connection retries=45 2024-11-09T03:52:02,705 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T03:52:02,705 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-09T03:52:02,705 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-09T03:52:02,705 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T03:52:02,705 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-09T03:52:02,705 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-09T03:52:02,706 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-09T03:52:02,706 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37273 2024-11-09T03:52:02,708 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37273 connecting to ZooKeeper ensemble=127.0.0.1:54975 2024-11-09T03:52:02,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:372730x0, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-09T03:52:02,765 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37273-0x1011db8dd280000 connected 2024-11-09T03:52:02,851 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T03:52:02,855 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T03:52:02,859 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37273-0x1011db8dd280000, quorum=127.0.0.1:54975, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T03:52:02,859 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba, hbase.cluster.distributed=false 2024-11-09T03:52:02,862 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37273-0x1011db8dd280000, quorum=127.0.0.1:54975, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-09T03:52:02,862 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37273 2024-11-09T03:52:02,862 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37273 2024-11-09T03:52:02,863 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37273 2024-11-09T03:52:02,863 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37273 2024-11-09T03:52:02,864 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37273 2024-11-09T03:52:02,884 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/6ee74a15f3e3:0 server-side Connection retries=45 2024-11-09T03:52:02,884 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T03:52:02,885 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-09T03:52:02,885 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-09T03:52:02,885 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T03:52:02,885 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-09T03:52:02,885 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-09T03:52:02,885 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-09T03:52:02,886 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44037 2024-11-09T03:52:02,887 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44037 connecting to ZooKeeper ensemble=127.0.0.1:54975 2024-11-09T03:52:02,888 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T03:52:02,889 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T03:52:02,902 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:440370x0, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-09T03:52:02,902 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44037-0x1011db8dd280001 connected 2024-11-09T03:52:02,903 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44037-0x1011db8dd280001, quorum=127.0.0.1:54975, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T03:52:02,903 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-09T03:52:02,904 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-09T03:52:02,905 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44037-0x1011db8dd280001, quorum=127.0.0.1:54975, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-09T03:52:02,906 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44037-0x1011db8dd280001, quorum=127.0.0.1:54975, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-09T03:52:02,907 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44037 2024-11-09T03:52:02,907 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44037 2024-11-09T03:52:02,907 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44037 2024-11-09T03:52:02,908 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44037 2024-11-09T03:52:02,908 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44037 2024-11-09T03:52:02,924 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/6ee74a15f3e3:0 server-side Connection retries=45 2024-11-09T03:52:02,924 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T03:52:02,924 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-09T03:52:02,924 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-09T03:52:02,925 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T03:52:02,925 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-09T03:52:02,925 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-09T03:52:02,925 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-09T03:52:02,925 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33163 2024-11-09T03:52:02,927 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33163 connecting to ZooKeeper ensemble=127.0.0.1:54975 2024-11-09T03:52:02,927 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T03:52:02,929 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T03:52:02,944 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:331630x0, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-09T03:52:02,945 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33163-0x1011db8dd280002 connected 2024-11-09T03:52:02,945 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33163-0x1011db8dd280002, quorum=127.0.0.1:54975, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T03:52:02,945 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-09T03:52:02,946 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-09T03:52:02,947 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33163-0x1011db8dd280002, quorum=127.0.0.1:54975, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-09T03:52:02,948 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33163-0x1011db8dd280002, quorum=127.0.0.1:54975, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-09T03:52:02,949 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33163 2024-11-09T03:52:02,949 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33163 2024-11-09T03:52:02,951 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33163 2024-11-09T03:52:02,952 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33163 2024-11-09T03:52:02,952 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33163 2024-11-09T03:52:02,967 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/6ee74a15f3e3:0 server-side Connection retries=45 2024-11-09T03:52:02,967 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T03:52:02,967 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-09T03:52:02,967 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-09T03:52:02,967 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T03:52:02,967 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-09T03:52:02,968 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-09T03:52:02,968 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-09T03:52:02,968 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45149 2024-11-09T03:52:02,970 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45149 connecting to ZooKeeper ensemble=127.0.0.1:54975 2024-11-09T03:52:02,970 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T03:52:02,972 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T03:52:02,986 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:451490x0, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-09T03:52:02,987 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:451490x0, quorum=127.0.0.1:54975, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T03:52:02,987 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45149-0x1011db8dd280003 connected 2024-11-09T03:52:02,987 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-09T03:52:02,988 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-09T03:52:02,989 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45149-0x1011db8dd280003, quorum=127.0.0.1:54975, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-09T03:52:02,991 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45149-0x1011db8dd280003, quorum=127.0.0.1:54975, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-09T03:52:02,991 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45149 2024-11-09T03:52:02,992 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45149 2024-11-09T03:52:02,995 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45149 2024-11-09T03:52:02,995 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45149 2024-11-09T03:52:02,995 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45149 2024-11-09T03:52:03,009 DEBUG [M:0;6ee74a15f3e3:37273 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;6ee74a15f3e3:37273 2024-11-09T03:52:03,010 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/6ee74a15f3e3,37273,1731124322704 2024-11-09T03:52:03,017 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37273-0x1011db8dd280000, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T03:52:03,017 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33163-0x1011db8dd280002, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T03:52:03,018 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45149-0x1011db8dd280003, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T03:52:03,017 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44037-0x1011db8dd280001, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T03:52:03,018 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37273-0x1011db8dd280000, quorum=127.0.0.1:54975, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/6ee74a15f3e3,37273,1731124322704 2024-11-09T03:52:03,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44037-0x1011db8dd280001, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-09T03:52:03,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37273-0x1011db8dd280000, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:52:03,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45149-0x1011db8dd280003, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-09T03:52:03,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33163-0x1011db8dd280002, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-09T03:52:03,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45149-0x1011db8dd280003, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:52:03,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33163-0x1011db8dd280002, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:52:03,029 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44037-0x1011db8dd280001, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:52:03,029 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37273-0x1011db8dd280000, quorum=127.0.0.1:54975, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-09T03:52:03,030 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/6ee74a15f3e3,37273,1731124322704 from backup master directory 2024-11-09T03:52:03,039 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33163-0x1011db8dd280002, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T03:52:03,039 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44037-0x1011db8dd280001, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T03:52:03,039 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45149-0x1011db8dd280003, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T03:52:03,039 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37273-0x1011db8dd280000, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/6ee74a15f3e3,37273,1731124322704 2024-11-09T03:52:03,039 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37273-0x1011db8dd280000, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T03:52:03,039 WARN [master/6ee74a15f3e3:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-09T03:52:03,039 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=6ee74a15f3e3,37273,1731124322704 2024-11-09T03:52:03,045 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/hbase.id] with ID: c0ff621e-2ce4-4d13-927d-4891fe4d95b1 2024-11-09T03:52:03,045 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/.tmp/hbase.id 2024-11-09T03:52:03,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34661 is added to blk_1073741826_1002 (size=42) 2024-11-09T03:52:03,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34525 is added to blk_1073741826_1002 (size=42) 2024-11-09T03:52:03,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37363 is added to blk_1073741826_1002 (size=42) 2024-11-09T03:52:03,055 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/.tmp/hbase.id]:[hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/hbase.id] 2024-11-09T03:52:03,070 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T03:52:03,071 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-09T03:52:03,073 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-09T03:52:03,081 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37273-0x1011db8dd280000, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:52:03,081 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33163-0x1011db8dd280002, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:52:03,081 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45149-0x1011db8dd280003, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:52:03,081 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44037-0x1011db8dd280001, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:52:03,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34525 is added to blk_1073741827_1003 (size=196) 2024-11-09T03:52:03,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37363 is added to blk_1073741827_1003 (size=196) 2024-11-09T03:52:03,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34661 is added to blk_1073741827_1003 (size=196) 2024-11-09T03:52:03,093 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-09T03:52:03,094 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-09T03:52:03,094 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-09T03:52:03,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34661 is added to blk_1073741828_1004 (size=1189) 2024-11-09T03:52:03,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34525 is added to blk_1073741828_1004 (size=1189) 2024-11-09T03:52:03,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37363 is added to blk_1073741828_1004 (size=1189) 2024-11-09T03:52:03,108 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/MasterData/data/master/store 2024-11-09T03:52:03,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37363 is added to blk_1073741829_1005 (size=34) 2024-11-09T03:52:03,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34661 is added to blk_1073741829_1005 (size=34) 2024-11-09T03:52:03,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34525 is added to blk_1073741829_1005 (size=34) 2024-11-09T03:52:03,118 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T03:52:03,119 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-09T03:52:03,119 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T03:52:03,119 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T03:52:03,119 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-09T03:52:03,119 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T03:52:03,119 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T03:52:03,119 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731124323119Disabling compacts and flushes for region at 1731124323119Disabling writes for close at 1731124323119Writing region close event to WAL at 1731124323119Closed at 1731124323119 2024-11-09T03:52:03,120 WARN [master/6ee74a15f3e3:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/MasterData/data/master/store/.initializing 2024-11-09T03:52:03,120 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/MasterData/WALs/6ee74a15f3e3,37273,1731124322704 2024-11-09T03:52:03,124 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6ee74a15f3e3%2C37273%2C1731124322704, suffix=, logDir=hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/MasterData/WALs/6ee74a15f3e3,37273,1731124322704, archiveDir=hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/MasterData/oldWALs, maxLogs=10 2024-11-09T03:52:03,124 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ee74a15f3e3%2C37273%2C1731124322704.1731124323124 2024-11-09T03:52:03,134 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/MasterData/WALs/6ee74a15f3e3,37273,1731124322704/6ee74a15f3e3%2C37273%2C1731124322704.1731124323124 2024-11-09T03:52:03,137 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36993:36993),(127.0.0.1/127.0.0.1:37491:37491),(127.0.0.1/127.0.0.1:39529:39529)] 2024-11-09T03:52:03,138 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-09T03:52:03,139 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T03:52:03,139 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T03:52:03,139 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T03:52:03,141 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-09T03:52:03,143 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-09T03:52:03,143 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T03:52:03,144 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T03:52:03,144 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-09T03:52:03,146 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-09T03:52:03,146 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T03:52:03,147 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T03:52:03,147 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-09T03:52:03,149 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-09T03:52:03,150 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T03:52:03,150 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T03:52:03,151 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-09T03:52:03,152 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-09T03:52:03,152 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T03:52:03,153 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T03:52:03,153 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T03:52:03,154 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-09T03:52:03,155 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-09T03:52:03,156 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T03:52:03,156 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T03:52:03,157 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-09T03:52:03,158 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T03:52:03,160 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-09T03:52:03,161 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64133492, jitterRate=-0.04433649778366089}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-09T03:52:03,162 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731124323139Initializing all the Stores at 1731124323140 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731124323140Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731124323141 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731124323141Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731124323141Cleaning up temporary data from old regions at 1731124323156 (+15 ms)Region opened successfully at 1731124323162 (+6 ms) 2024-11-09T03:52:03,162 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-09T03:52:03,166 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3de8d591, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6ee74a15f3e3/172.17.0.2:0 2024-11-09T03:52:03,168 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-09T03:52:03,168 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-09T03:52:03,168 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-09T03:52:03,168 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-09T03:52:03,169 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-09T03:52:03,170 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-09T03:52:03,170 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-09T03:52:03,173 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-09T03:52:03,173 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37273-0x1011db8dd280000, quorum=127.0.0.1:54975, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-09T03:52:03,186 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-09T03:52:03,186 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-09T03:52:03,187 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37273-0x1011db8dd280000, quorum=127.0.0.1:54975, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-09T03:52:03,196 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-09T03:52:03,197 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-09T03:52:03,198 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37273-0x1011db8dd280000, quorum=127.0.0.1:54975, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-09T03:52:03,207 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-09T03:52:03,208 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37273-0x1011db8dd280000, quorum=127.0.0.1:54975, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-09T03:52:03,217 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-09T03:52:03,220 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37273-0x1011db8dd280000, quorum=127.0.0.1:54975, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-09T03:52:03,228 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-09T03:52:03,239 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37273-0x1011db8dd280000, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-09T03:52:03,239 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33163-0x1011db8dd280002, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-09T03:52:03,239 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45149-0x1011db8dd280003, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-09T03:52:03,239 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37273-0x1011db8dd280000, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:52:03,239 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33163-0x1011db8dd280002, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:52:03,239 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45149-0x1011db8dd280003, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:52:03,239 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44037-0x1011db8dd280001, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-09T03:52:03,239 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44037-0x1011db8dd280001, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:52:03,240 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=6ee74a15f3e3,37273,1731124322704, sessionid=0x1011db8dd280000, setting cluster-up flag (Was=false) 2024-11-09T03:52:03,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37273-0x1011db8dd280000, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:52:03,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33163-0x1011db8dd280002, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:52:03,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44037-0x1011db8dd280001, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:52:03,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45149-0x1011db8dd280003, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:52:03,291 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-09T03:52:03,293 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6ee74a15f3e3,37273,1731124322704 2024-11-09T03:52:03,312 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37273-0x1011db8dd280000, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:52:03,312 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44037-0x1011db8dd280001, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:52:03,312 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45149-0x1011db8dd280003, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:52:03,313 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33163-0x1011db8dd280002, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:52:03,344 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-09T03:52:03,345 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6ee74a15f3e3,37273,1731124322704 2024-11-09T03:52:03,347 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-09T03:52:03,349 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-09T03:52:03,350 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-09T03:52:03,350 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-09T03:52:03,350 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 6ee74a15f3e3,37273,1731124322704 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-09T03:52:03,352 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/6ee74a15f3e3:0, corePoolSize=5, maxPoolSize=5 2024-11-09T03:52:03,352 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/6ee74a15f3e3:0, corePoolSize=5, maxPoolSize=5 2024-11-09T03:52:03,352 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/6ee74a15f3e3:0, corePoolSize=5, maxPoolSize=5 2024-11-09T03:52:03,352 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/6ee74a15f3e3:0, corePoolSize=5, maxPoolSize=5 2024-11-09T03:52:03,352 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/6ee74a15f3e3:0, corePoolSize=10, maxPoolSize=10 2024-11-09T03:52:03,352 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:52:03,352 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/6ee74a15f3e3:0, corePoolSize=2, maxPoolSize=2 2024-11-09T03:52:03,352 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:52:03,353 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731124353353 2024-11-09T03:52:03,353 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-09T03:52:03,353 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-09T03:52:03,353 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-09T03:52:03,353 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-09T03:52:03,353 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-09T03:52:03,354 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-09T03:52:03,354 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-09T03:52:03,354 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-09T03:52:03,354 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-09T03:52:03,354 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-09T03:52:03,355 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-09T03:52:03,355 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-09T03:52:03,355 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-09T03:52:03,355 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-09T03:52:03,355 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/6ee74a15f3e3:0:becomeActiveMaster-HFileCleaner.large.0-1731124323355,5,FailOnTimeoutGroup] 2024-11-09T03:52:03,355 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/6ee74a15f3e3:0:becomeActiveMaster-HFileCleaner.small.0-1731124323355,5,FailOnTimeoutGroup] 2024-11-09T03:52:03,355 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-09T03:52:03,355 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-09T03:52:03,355 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-09T03:52:03,355 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-09T03:52:03,356 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T03:52:03,356 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-09T03:52:03,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37363 is added to blk_1073741831_1007 (size=1321) 2024-11-09T03:52:03,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34661 is added to blk_1073741831_1007 (size=1321) 2024-11-09T03:52:03,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34525 is added to blk_1073741831_1007 (size=1321) 2024-11-09T03:52:03,367 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-09T03:52:03,367 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba 2024-11-09T03:52:03,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37363 is added to blk_1073741832_1008 (size=32) 2024-11-09T03:52:03,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34525 is added to blk_1073741832_1008 (size=32) 2024-11-09T03:52:03,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34661 is added to blk_1073741832_1008 (size=32) 2024-11-09T03:52:03,383 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T03:52:03,384 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-09T03:52:03,386 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-09T03:52:03,386 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T03:52:03,387 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T03:52:03,387 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-09T03:52:03,388 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-09T03:52:03,388 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T03:52:03,389 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T03:52:03,389 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-09T03:52:03,390 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-09T03:52:03,391 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T03:52:03,391 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T03:52:03,391 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-09T03:52:03,393 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-09T03:52:03,393 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T03:52:03,393 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T03:52:03,393 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-09T03:52:03,394 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/data/hbase/meta/1588230740 2024-11-09T03:52:03,395 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/data/hbase/meta/1588230740 2024-11-09T03:52:03,396 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-09T03:52:03,396 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-09T03:52:03,397 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-09T03:52:03,398 INFO [RS:1;6ee74a15f3e3:33163 {}] regionserver.HRegionServer(746): ClusterId : c0ff621e-2ce4-4d13-927d-4891fe4d95b1 2024-11-09T03:52:03,398 INFO [RS:0;6ee74a15f3e3:44037 {}] regionserver.HRegionServer(746): ClusterId : c0ff621e-2ce4-4d13-927d-4891fe4d95b1 2024-11-09T03:52:03,398 DEBUG [RS:0;6ee74a15f3e3:44037 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-09T03:52:03,398 DEBUG [RS:1;6ee74a15f3e3:33163 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-09T03:52:03,398 INFO [RS:2;6ee74a15f3e3:45149 {}] regionserver.HRegionServer(746): ClusterId : c0ff621e-2ce4-4d13-927d-4891fe4d95b1 2024-11-09T03:52:03,398 DEBUG [RS:2;6ee74a15f3e3:45149 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-09T03:52:03,399 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-09T03:52:03,402 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-09T03:52:03,403 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69113563, jitterRate=0.029872342944145203}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-09T03:52:03,404 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731124323383Initializing all the Stores at 1731124323384 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731124323384Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731124323384Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731124323384Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731124323384Cleaning up temporary data from old regions at 1731124323396 (+12 ms)Region opened successfully at 1731124323404 (+8 ms) 2024-11-09T03:52:03,404 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-09T03:52:03,404 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-09T03:52:03,404 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-09T03:52:03,404 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-09T03:52:03,404 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-09T03:52:03,405 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-09T03:52:03,405 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731124323404Disabling compacts and flushes for region at 1731124323404Disabling writes for close at 1731124323404Writing region close event to WAL at 1731124323405 (+1 ms)Closed at 1731124323405 2024-11-09T03:52:03,407 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-09T03:52:03,407 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-09T03:52:03,407 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-09T03:52:03,409 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-09T03:52:03,410 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-09T03:52:03,418 DEBUG [RS:1;6ee74a15f3e3:33163 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-09T03:52:03,418 DEBUG [RS:0;6ee74a15f3e3:44037 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-09T03:52:03,418 DEBUG [RS:1;6ee74a15f3e3:33163 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-09T03:52:03,418 DEBUG [RS:0;6ee74a15f3e3:44037 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-09T03:52:03,428 DEBUG [RS:2;6ee74a15f3e3:45149 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-09T03:52:03,428 DEBUG [RS:2;6ee74a15f3e3:45149 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-09T03:52:03,439 DEBUG [RS:0;6ee74a15f3e3:44037 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-09T03:52:03,439 DEBUG [RS:1;6ee74a15f3e3:33163 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-09T03:52:03,439 DEBUG [RS:1;6ee74a15f3e3:33163 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@41140356, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6ee74a15f3e3/172.17.0.2:0 2024-11-09T03:52:03,439 DEBUG [RS:0;6ee74a15f3e3:44037 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@15446f29, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6ee74a15f3e3/172.17.0.2:0 2024-11-09T03:52:03,440 DEBUG [RS:2;6ee74a15f3e3:45149 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-09T03:52:03,440 DEBUG [RS:2;6ee74a15f3e3:45149 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@53228ff2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6ee74a15f3e3/172.17.0.2:0 2024-11-09T03:52:03,454 DEBUG [RS:1;6ee74a15f3e3:33163 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;6ee74a15f3e3:33163 2024-11-09T03:52:03,454 DEBUG [RS:0;6ee74a15f3e3:44037 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;6ee74a15f3e3:44037 2024-11-09T03:52:03,454 DEBUG [RS:2;6ee74a15f3e3:45149 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;6ee74a15f3e3:45149 2024-11-09T03:52:03,454 INFO [RS:1;6ee74a15f3e3:33163 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-09T03:52:03,454 INFO [RS:1;6ee74a15f3e3:33163 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-09T03:52:03,454 INFO [RS:0;6ee74a15f3e3:44037 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-09T03:52:03,454 DEBUG [RS:1;6ee74a15f3e3:33163 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-09T03:52:03,454 INFO [RS:2;6ee74a15f3e3:45149 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-09T03:52:03,454 INFO [RS:0;6ee74a15f3e3:44037 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-09T03:52:03,454 INFO [RS:2;6ee74a15f3e3:45149 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-09T03:52:03,455 DEBUG [RS:0;6ee74a15f3e3:44037 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-09T03:52:03,455 DEBUG [RS:2;6ee74a15f3e3:45149 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-09T03:52:03,455 INFO [RS:1;6ee74a15f3e3:33163 {}] regionserver.HRegionServer(2659): reportForDuty to master=6ee74a15f3e3,37273,1731124322704 with port=33163, startcode=1731124322924 2024-11-09T03:52:03,455 INFO [RS:0;6ee74a15f3e3:44037 {}] regionserver.HRegionServer(2659): reportForDuty to master=6ee74a15f3e3,37273,1731124322704 with port=44037, startcode=1731124322884 2024-11-09T03:52:03,455 INFO [RS:2;6ee74a15f3e3:45149 {}] regionserver.HRegionServer(2659): reportForDuty to master=6ee74a15f3e3,37273,1731124322704 with port=45149, startcode=1731124322967 2024-11-09T03:52:03,456 DEBUG [RS:2;6ee74a15f3e3:45149 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-09T03:52:03,456 DEBUG [RS:0;6ee74a15f3e3:44037 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-09T03:52:03,456 DEBUG [RS:1;6ee74a15f3e3:33163 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-09T03:52:03,458 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45573, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-09T03:52:03,458 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38721, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-09T03:52:03,458 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54075, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-09T03:52:03,458 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37273 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 6ee74a15f3e3,44037,1731124322884 2024-11-09T03:52:03,458 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37273 {}] master.ServerManager(517): Registering regionserver=6ee74a15f3e3,44037,1731124322884 2024-11-09T03:52:03,460 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37273 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 6ee74a15f3e3,45149,1731124322967 2024-11-09T03:52:03,461 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37273 {}] master.ServerManager(517): Registering regionserver=6ee74a15f3e3,45149,1731124322967 2024-11-09T03:52:03,461 DEBUG [RS:0;6ee74a15f3e3:44037 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba 2024-11-09T03:52:03,461 DEBUG [RS:0;6ee74a15f3e3:44037 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33683 2024-11-09T03:52:03,461 DEBUG [RS:0;6ee74a15f3e3:44037 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-09T03:52:03,462 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37273 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 6ee74a15f3e3,33163,1731124322924 2024-11-09T03:52:03,463 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37273 {}] master.ServerManager(517): Registering regionserver=6ee74a15f3e3,33163,1731124322924 2024-11-09T03:52:03,463 DEBUG [RS:2;6ee74a15f3e3:45149 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba 2024-11-09T03:52:03,463 DEBUG [RS:2;6ee74a15f3e3:45149 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33683 2024-11-09T03:52:03,463 DEBUG [RS:2;6ee74a15f3e3:45149 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-09T03:52:03,464 DEBUG [RS:1;6ee74a15f3e3:33163 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba 2024-11-09T03:52:03,465 DEBUG [RS:1;6ee74a15f3e3:33163 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33683 2024-11-09T03:52:03,465 DEBUG [RS:1;6ee74a15f3e3:33163 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-09T03:52:03,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37273-0x1011db8dd280000, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-09T03:52:03,512 DEBUG [RS:0;6ee74a15f3e3:44037 {}] zookeeper.ZKUtil(111): regionserver:44037-0x1011db8dd280001, quorum=127.0.0.1:54975, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/6ee74a15f3e3,44037,1731124322884 2024-11-09T03:52:03,512 WARN [RS:0;6ee74a15f3e3:44037 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-09T03:52:03,513 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [6ee74a15f3e3,45149,1731124322967] 2024-11-09T03:52:03,513 INFO [RS:0;6ee74a15f3e3:44037 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-09T03:52:03,513 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [6ee74a15f3e3,44037,1731124322884] 2024-11-09T03:52:03,513 DEBUG [RS:2;6ee74a15f3e3:45149 {}] zookeeper.ZKUtil(111): regionserver:45149-0x1011db8dd280003, quorum=127.0.0.1:54975, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/6ee74a15f3e3,45149,1731124322967 2024-11-09T03:52:03,513 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [6ee74a15f3e3,33163,1731124322924] 2024-11-09T03:52:03,513 DEBUG [RS:0;6ee74a15f3e3:44037 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/WALs/6ee74a15f3e3,44037,1731124322884 2024-11-09T03:52:03,513 WARN [RS:2;6ee74a15f3e3:45149 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-09T03:52:03,513 DEBUG [RS:1;6ee74a15f3e3:33163 {}] zookeeper.ZKUtil(111): regionserver:33163-0x1011db8dd280002, quorum=127.0.0.1:54975, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/6ee74a15f3e3,33163,1731124322924 2024-11-09T03:52:03,513 WARN [RS:1;6ee74a15f3e3:33163 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-09T03:52:03,513 INFO [RS:2;6ee74a15f3e3:45149 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-09T03:52:03,513 INFO [RS:1;6ee74a15f3e3:33163 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-09T03:52:03,513 DEBUG [RS:2;6ee74a15f3e3:45149 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/WALs/6ee74a15f3e3,45149,1731124322967 2024-11-09T03:52:03,514 DEBUG [RS:1;6ee74a15f3e3:33163 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/WALs/6ee74a15f3e3,33163,1731124322924 2024-11-09T03:52:03,525 INFO [RS:2;6ee74a15f3e3:45149 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-09T03:52:03,525 INFO [RS:0;6ee74a15f3e3:44037 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-09T03:52:03,525 INFO [RS:1;6ee74a15f3e3:33163 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-09T03:52:03,527 INFO [RS:0;6ee74a15f3e3:44037 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-09T03:52:03,527 INFO [RS:0;6ee74a15f3e3:44037 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-09T03:52:03,527 INFO [RS:0;6ee74a15f3e3:44037 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T03:52:03,527 INFO [RS:0;6ee74a15f3e3:44037 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-09T03:52:03,528 INFO [RS:0;6ee74a15f3e3:44037 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-09T03:52:03,529 INFO [RS:0;6ee74a15f3e3:44037 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-09T03:52:03,529 DEBUG [RS:0;6ee74a15f3e3:44037 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:52:03,529 INFO [RS:2;6ee74a15f3e3:45149 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-09T03:52:03,529 DEBUG [RS:0;6ee74a15f3e3:44037 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:52:03,529 DEBUG [RS:0;6ee74a15f3e3:44037 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:52:03,529 DEBUG [RS:0;6ee74a15f3e3:44037 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:52:03,529 INFO [RS:2;6ee74a15f3e3:45149 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-09T03:52:03,529 DEBUG [RS:0;6ee74a15f3e3:44037 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:52:03,529 INFO [RS:2;6ee74a15f3e3:45149 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T03:52:03,529 DEBUG [RS:0;6ee74a15f3e3:44037 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/6ee74a15f3e3:0, corePoolSize=2, maxPoolSize=2 2024-11-09T03:52:03,529 DEBUG [RS:0;6ee74a15f3e3:44037 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:52:03,530 DEBUG [RS:0;6ee74a15f3e3:44037 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:52:03,530 DEBUG [RS:0;6ee74a15f3e3:44037 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:52:03,530 DEBUG [RS:0;6ee74a15f3e3:44037 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:52:03,530 DEBUG [RS:0;6ee74a15f3e3:44037 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:52:03,530 DEBUG [RS:0;6ee74a15f3e3:44037 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:52:03,530 DEBUG [RS:0;6ee74a15f3e3:44037 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/6ee74a15f3e3:0, corePoolSize=3, maxPoolSize=3 2024-11-09T03:52:03,530 DEBUG [RS:0;6ee74a15f3e3:44037 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/6ee74a15f3e3:0, corePoolSize=3, maxPoolSize=3 2024-11-09T03:52:03,530 INFO [RS:2;6ee74a15f3e3:45149 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-09T03:52:03,531 INFO [RS:2;6ee74a15f3e3:45149 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-09T03:52:03,532 INFO [RS:2;6ee74a15f3e3:45149 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-09T03:52:03,532 DEBUG [RS:2;6ee74a15f3e3:45149 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:52:03,532 DEBUG [RS:2;6ee74a15f3e3:45149 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:52:03,532 INFO [RS:0;6ee74a15f3e3:44037 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T03:52:03,532 DEBUG [RS:2;6ee74a15f3e3:45149 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:52:03,532 INFO [RS:0;6ee74a15f3e3:44037 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T03:52:03,532 INFO [RS:0;6ee74a15f3e3:44037 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T03:52:03,532 DEBUG [RS:2;6ee74a15f3e3:45149 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:52:03,532 INFO [RS:0;6ee74a15f3e3:44037 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-09T03:52:03,532 INFO [RS:0;6ee74a15f3e3:44037 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-09T03:52:03,532 DEBUG [RS:2;6ee74a15f3e3:45149 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:52:03,532 INFO [RS:0;6ee74a15f3e3:44037 {}] hbase.ChoreService(168): Chore ScheduledChore name=6ee74a15f3e3,44037,1731124322884-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-09T03:52:03,532 DEBUG [RS:2;6ee74a15f3e3:45149 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/6ee74a15f3e3:0, corePoolSize=2, maxPoolSize=2 2024-11-09T03:52:03,533 DEBUG [RS:2;6ee74a15f3e3:45149 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:52:03,533 DEBUG [RS:2;6ee74a15f3e3:45149 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:52:03,533 DEBUG [RS:2;6ee74a15f3e3:45149 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:52:03,533 DEBUG [RS:2;6ee74a15f3e3:45149 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:52:03,533 DEBUG [RS:2;6ee74a15f3e3:45149 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:52:03,533 DEBUG [RS:2;6ee74a15f3e3:45149 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:52:03,533 DEBUG [RS:2;6ee74a15f3e3:45149 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/6ee74a15f3e3:0, corePoolSize=3, maxPoolSize=3 2024-11-09T03:52:03,533 DEBUG [RS:2;6ee74a15f3e3:45149 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/6ee74a15f3e3:0, corePoolSize=3, maxPoolSize=3 2024-11-09T03:52:03,534 INFO [RS:1;6ee74a15f3e3:33163 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-09T03:52:03,535 INFO [RS:1;6ee74a15f3e3:33163 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-09T03:52:03,535 INFO [RS:1;6ee74a15f3e3:33163 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T03:52:03,536 INFO [RS:2;6ee74a15f3e3:45149 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T03:52:03,536 INFO [RS:2;6ee74a15f3e3:45149 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T03:52:03,536 INFO [RS:2;6ee74a15f3e3:45149 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T03:52:03,536 INFO [RS:2;6ee74a15f3e3:45149 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-09T03:52:03,536 INFO [RS:2;6ee74a15f3e3:45149 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-09T03:52:03,537 INFO [RS:2;6ee74a15f3e3:45149 {}] hbase.ChoreService(168): Chore ScheduledChore name=6ee74a15f3e3,45149,1731124322967-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-09T03:52:03,537 INFO [RS:1;6ee74a15f3e3:33163 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-09T03:52:03,538 INFO [RS:1;6ee74a15f3e3:33163 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-09T03:52:03,538 INFO [RS:1;6ee74a15f3e3:33163 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-09T03:52:03,538 DEBUG [RS:1;6ee74a15f3e3:33163 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:52:03,538 DEBUG [RS:1;6ee74a15f3e3:33163 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:52:03,538 DEBUG [RS:1;6ee74a15f3e3:33163 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:52:03,539 DEBUG [RS:1;6ee74a15f3e3:33163 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:52:03,539 DEBUG [RS:1;6ee74a15f3e3:33163 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:52:03,539 DEBUG [RS:1;6ee74a15f3e3:33163 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/6ee74a15f3e3:0, corePoolSize=2, maxPoolSize=2 2024-11-09T03:52:03,539 DEBUG [RS:1;6ee74a15f3e3:33163 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:52:03,539 DEBUG [RS:1;6ee74a15f3e3:33163 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:52:03,539 DEBUG [RS:1;6ee74a15f3e3:33163 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:52:03,539 DEBUG [RS:1;6ee74a15f3e3:33163 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:52:03,539 DEBUG [RS:1;6ee74a15f3e3:33163 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:52:03,539 DEBUG [RS:1;6ee74a15f3e3:33163 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/6ee74a15f3e3:0, corePoolSize=1, maxPoolSize=1 2024-11-09T03:52:03,539 DEBUG [RS:1;6ee74a15f3e3:33163 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/6ee74a15f3e3:0, corePoolSize=3, maxPoolSize=3 2024-11-09T03:52:03,539 DEBUG [RS:1;6ee74a15f3e3:33163 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/6ee74a15f3e3:0, corePoolSize=3, maxPoolSize=3 2024-11-09T03:52:03,542 INFO [RS:1;6ee74a15f3e3:33163 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T03:52:03,542 INFO [RS:1;6ee74a15f3e3:33163 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T03:52:03,542 INFO [RS:1;6ee74a15f3e3:33163 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T03:52:03,542 INFO [RS:1;6ee74a15f3e3:33163 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-09T03:52:03,542 INFO [RS:1;6ee74a15f3e3:33163 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-09T03:52:03,543 INFO [RS:1;6ee74a15f3e3:33163 {}] hbase.ChoreService(168): Chore ScheduledChore name=6ee74a15f3e3,33163,1731124322924-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-09T03:52:03,549 INFO [RS:0;6ee74a15f3e3:44037 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-09T03:52:03,550 INFO [RS:0;6ee74a15f3e3:44037 {}] hbase.ChoreService(168): Chore ScheduledChore name=6ee74a15f3e3,44037,1731124322884-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T03:52:03,550 INFO [RS:0;6ee74a15f3e3:44037 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T03:52:03,550 INFO [RS:0;6ee74a15f3e3:44037 {}] regionserver.Replication(171): 6ee74a15f3e3,44037,1731124322884 started 2024-11-09T03:52:03,551 INFO [RS:2;6ee74a15f3e3:45149 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-09T03:52:03,551 INFO [RS:2;6ee74a15f3e3:45149 {}] hbase.ChoreService(168): Chore ScheduledChore name=6ee74a15f3e3,45149,1731124322967-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T03:52:03,551 INFO [RS:2;6ee74a15f3e3:45149 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T03:52:03,551 INFO [RS:2;6ee74a15f3e3:45149 {}] regionserver.Replication(171): 6ee74a15f3e3,45149,1731124322967 started 2024-11-09T03:52:03,556 INFO [RS:1;6ee74a15f3e3:33163 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-09T03:52:03,556 INFO [RS:1;6ee74a15f3e3:33163 {}] hbase.ChoreService(168): Chore ScheduledChore name=6ee74a15f3e3,33163,1731124322924-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T03:52:03,556 INFO [RS:1;6ee74a15f3e3:33163 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T03:52:03,556 INFO [RS:1;6ee74a15f3e3:33163 {}] regionserver.Replication(171): 6ee74a15f3e3,33163,1731124322924 started 2024-11-09T03:52:03,561 WARN [6ee74a15f3e3:37273 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-09T03:52:03,563 INFO [RS:0;6ee74a15f3e3:44037 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T03:52:03,563 INFO [RS:0;6ee74a15f3e3:44037 {}] regionserver.HRegionServer(1482): Serving as 6ee74a15f3e3,44037,1731124322884, RpcServer on 6ee74a15f3e3/172.17.0.2:44037, sessionid=0x1011db8dd280001 2024-11-09T03:52:03,563 DEBUG [RS:0;6ee74a15f3e3:44037 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-09T03:52:03,563 DEBUG [RS:0;6ee74a15f3e3:44037 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 6ee74a15f3e3,44037,1731124322884 2024-11-09T03:52:03,563 DEBUG [RS:0;6ee74a15f3e3:44037 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6ee74a15f3e3,44037,1731124322884' 2024-11-09T03:52:03,563 DEBUG [RS:0;6ee74a15f3e3:44037 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-09T03:52:03,564 DEBUG [RS:0;6ee74a15f3e3:44037 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-09T03:52:03,564 DEBUG [RS:0;6ee74a15f3e3:44037 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-09T03:52:03,564 DEBUG [RS:0;6ee74a15f3e3:44037 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-09T03:52:03,564 DEBUG [RS:0;6ee74a15f3e3:44037 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 6ee74a15f3e3,44037,1731124322884 2024-11-09T03:52:03,564 DEBUG [RS:0;6ee74a15f3e3:44037 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6ee74a15f3e3,44037,1731124322884' 2024-11-09T03:52:03,564 DEBUG [RS:0;6ee74a15f3e3:44037 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-09T03:52:03,564 INFO [RS:2;6ee74a15f3e3:45149 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T03:52:03,565 INFO [RS:2;6ee74a15f3e3:45149 {}] regionserver.HRegionServer(1482): Serving as 6ee74a15f3e3,45149,1731124322967, RpcServer on 6ee74a15f3e3/172.17.0.2:45149, sessionid=0x1011db8dd280003 2024-11-09T03:52:03,565 DEBUG [RS:0;6ee74a15f3e3:44037 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-09T03:52:03,565 DEBUG [RS:2;6ee74a15f3e3:45149 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-09T03:52:03,565 DEBUG [RS:2;6ee74a15f3e3:45149 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 6ee74a15f3e3,45149,1731124322967 2024-11-09T03:52:03,565 DEBUG [RS:2;6ee74a15f3e3:45149 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6ee74a15f3e3,45149,1731124322967' 2024-11-09T03:52:03,565 DEBUG [RS:2;6ee74a15f3e3:45149 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-09T03:52:03,565 DEBUG [RS:0;6ee74a15f3e3:44037 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-09T03:52:03,565 INFO [RS:0;6ee74a15f3e3:44037 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-09T03:52:03,565 INFO [RS:0;6ee74a15f3e3:44037 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-09T03:52:03,565 DEBUG [RS:2;6ee74a15f3e3:45149 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-09T03:52:03,565 DEBUG [RS:2;6ee74a15f3e3:45149 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-09T03:52:03,566 DEBUG [RS:2;6ee74a15f3e3:45149 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-09T03:52:03,566 DEBUG [RS:2;6ee74a15f3e3:45149 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 6ee74a15f3e3,45149,1731124322967 2024-11-09T03:52:03,566 DEBUG [RS:2;6ee74a15f3e3:45149 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6ee74a15f3e3,45149,1731124322967' 2024-11-09T03:52:03,566 DEBUG [RS:2;6ee74a15f3e3:45149 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-09T03:52:03,566 DEBUG [RS:2;6ee74a15f3e3:45149 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-09T03:52:03,566 DEBUG [RS:2;6ee74a15f3e3:45149 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-09T03:52:03,566 INFO [RS:2;6ee74a15f3e3:45149 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-09T03:52:03,567 INFO [RS:2;6ee74a15f3e3:45149 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-09T03:52:03,569 INFO [RS:1;6ee74a15f3e3:33163 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T03:52:03,569 INFO [RS:1;6ee74a15f3e3:33163 {}] regionserver.HRegionServer(1482): Serving as 6ee74a15f3e3,33163,1731124322924, RpcServer on 6ee74a15f3e3/172.17.0.2:33163, sessionid=0x1011db8dd280002 2024-11-09T03:52:03,570 DEBUG [RS:1;6ee74a15f3e3:33163 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-09T03:52:03,570 DEBUG [RS:1;6ee74a15f3e3:33163 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 6ee74a15f3e3,33163,1731124322924 2024-11-09T03:52:03,570 DEBUG [RS:1;6ee74a15f3e3:33163 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6ee74a15f3e3,33163,1731124322924' 2024-11-09T03:52:03,570 DEBUG [RS:1;6ee74a15f3e3:33163 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-09T03:52:03,570 DEBUG [RS:1;6ee74a15f3e3:33163 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-09T03:52:03,571 DEBUG [RS:1;6ee74a15f3e3:33163 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-09T03:52:03,571 DEBUG [RS:1;6ee74a15f3e3:33163 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-09T03:52:03,571 DEBUG [RS:1;6ee74a15f3e3:33163 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 6ee74a15f3e3,33163,1731124322924 2024-11-09T03:52:03,571 DEBUG [RS:1;6ee74a15f3e3:33163 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6ee74a15f3e3,33163,1731124322924' 2024-11-09T03:52:03,571 DEBUG [RS:1;6ee74a15f3e3:33163 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-09T03:52:03,572 DEBUG [RS:1;6ee74a15f3e3:33163 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-09T03:52:03,572 DEBUG [RS:1;6ee74a15f3e3:33163 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-09T03:52:03,572 INFO [RS:1;6ee74a15f3e3:33163 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-09T03:52:03,572 INFO [RS:1;6ee74a15f3e3:33163 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-09T03:52:03,670 INFO [RS:0;6ee74a15f3e3:44037 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6ee74a15f3e3%2C44037%2C1731124322884, suffix=, logDir=hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/WALs/6ee74a15f3e3,44037,1731124322884, archiveDir=hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/oldWALs, maxLogs=32 2024-11-09T03:52:03,672 INFO [RS:0;6ee74a15f3e3:44037 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ee74a15f3e3%2C44037%2C1731124322884.1731124323672 2024-11-09T03:52:03,672 INFO [RS:2;6ee74a15f3e3:45149 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6ee74a15f3e3%2C45149%2C1731124322967, suffix=, logDir=hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/WALs/6ee74a15f3e3,45149,1731124322967, archiveDir=hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/oldWALs, maxLogs=32 2024-11-09T03:52:03,673 INFO [RS:2;6ee74a15f3e3:45149 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ee74a15f3e3%2C45149%2C1731124322967.1731124323673 2024-11-09T03:52:03,675 INFO [RS:1;6ee74a15f3e3:33163 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6ee74a15f3e3%2C33163%2C1731124322924, suffix=, logDir=hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/WALs/6ee74a15f3e3,33163,1731124322924, archiveDir=hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/oldWALs, maxLogs=32 2024-11-09T03:52:03,676 INFO [RS:1;6ee74a15f3e3:33163 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ee74a15f3e3%2C33163%2C1731124322924.1731124323676 2024-11-09T03:52:03,685 INFO [RS:2;6ee74a15f3e3:45149 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/WALs/6ee74a15f3e3,45149,1731124322967/6ee74a15f3e3%2C45149%2C1731124322967.1731124323673 2024-11-09T03:52:03,685 INFO [RS:0;6ee74a15f3e3:44037 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/WALs/6ee74a15f3e3,44037,1731124322884/6ee74a15f3e3%2C44037%2C1731124322884.1731124323672 2024-11-09T03:52:03,687 DEBUG [RS:2;6ee74a15f3e3:45149 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37491:37491),(127.0.0.1/127.0.0.1:39529:39529),(127.0.0.1/127.0.0.1:36993:36993)] 2024-11-09T03:52:03,687 DEBUG [RS:0;6ee74a15f3e3:44037 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39529:39529),(127.0.0.1/127.0.0.1:37491:37491),(127.0.0.1/127.0.0.1:36993:36993)] 2024-11-09T03:52:03,687 INFO [RS:1;6ee74a15f3e3:33163 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/WALs/6ee74a15f3e3,33163,1731124322924/6ee74a15f3e3%2C33163%2C1731124322924.1731124323676 2024-11-09T03:52:03,689 DEBUG [RS:1;6ee74a15f3e3:33163 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39529:39529),(127.0.0.1/127.0.0.1:37491:37491),(127.0.0.1/127.0.0.1:36993:36993)] 2024-11-09T03:52:03,811 DEBUG [6ee74a15f3e3:37273 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-09T03:52:03,812 DEBUG [6ee74a15f3e3:37273 {}] balancer.BalancerClusterState(204): Hosts are {6ee74a15f3e3=0} racks are {/default-rack=0} 2024-11-09T03:52:03,815 DEBUG [6ee74a15f3e3:37273 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-09T03:52:03,815 DEBUG [6ee74a15f3e3:37273 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-09T03:52:03,815 DEBUG [6ee74a15f3e3:37273 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-09T03:52:03,816 DEBUG [6ee74a15f3e3:37273 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-09T03:52:03,816 DEBUG [6ee74a15f3e3:37273 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-09T03:52:03,816 DEBUG [6ee74a15f3e3:37273 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-09T03:52:03,816 INFO [6ee74a15f3e3:37273 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-09T03:52:03,816 INFO [6ee74a15f3e3:37273 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-09T03:52:03,816 INFO [6ee74a15f3e3:37273 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-09T03:52:03,816 DEBUG [6ee74a15f3e3:37273 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-09T03:52:03,817 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=6ee74a15f3e3,33163,1731124322924 2024-11-09T03:52:03,819 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6ee74a15f3e3,33163,1731124322924, state=OPENING 2024-11-09T03:52:03,889 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-09T03:52:03,902 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33163-0x1011db8dd280002, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:52:03,902 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37273-0x1011db8dd280000, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:52:03,902 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44037-0x1011db8dd280001, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:52:03,902 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45149-0x1011db8dd280003, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:52:03,904 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T03:52:03,904 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-09T03:52:03,904 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T03:52:03,904 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T03:52:03,904 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=6ee74a15f3e3,33163,1731124322924}] 2024-11-09T03:52:03,905 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T03:52:04,061 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-09T03:52:04,066 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59197, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-09T03:52:04,074 INFO [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-09T03:52:04,075 INFO [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-09T03:52:04,078 INFO [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6ee74a15f3e3%2C33163%2C1731124322924.meta, suffix=.meta, logDir=hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/WALs/6ee74a15f3e3,33163,1731124322924, archiveDir=hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/oldWALs, maxLogs=32 2024-11-09T03:52:04,079 INFO [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 6ee74a15f3e3%2C33163%2C1731124322924.meta.1731124324079.meta 2024-11-09T03:52:04,088 INFO [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/WALs/6ee74a15f3e3,33163,1731124322924/6ee74a15f3e3%2C33163%2C1731124322924.meta.1731124324079.meta 2024-11-09T03:52:04,092 DEBUG [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39529:39529),(127.0.0.1/127.0.0.1:36993:36993),(127.0.0.1/127.0.0.1:37491:37491)] 2024-11-09T03:52:04,096 DEBUG [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-09T03:52:04,096 DEBUG [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-09T03:52:04,096 DEBUG [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-09T03:52:04,096 INFO [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-09T03:52:04,096 DEBUG [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-09T03:52:04,096 DEBUG [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T03:52:04,097 DEBUG [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-09T03:52:04,097 DEBUG [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-09T03:52:04,098 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-09T03:52:04,099 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-09T03:52:04,100 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T03:52:04,100 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T03:52:04,100 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-09T03:52:04,101 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-09T03:52:04,101 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T03:52:04,102 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T03:52:04,102 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-09T03:52:04,103 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-09T03:52:04,103 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T03:52:04,103 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T03:52:04,104 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-09T03:52:04,105 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-09T03:52:04,105 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T03:52:04,106 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T03:52:04,106 DEBUG [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-09T03:52:04,107 DEBUG [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/data/hbase/meta/1588230740 2024-11-09T03:52:04,108 DEBUG [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/data/hbase/meta/1588230740 2024-11-09T03:52:04,110 DEBUG [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-09T03:52:04,110 DEBUG [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-09T03:52:04,110 DEBUG [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-09T03:52:04,112 DEBUG [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-09T03:52:04,113 INFO [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67956005, jitterRate=0.012623384594917297}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-09T03:52:04,113 DEBUG [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-09T03:52:04,114 DEBUG [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731124324097Writing region info on filesystem at 1731124324097Initializing all the Stores at 1731124324098 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731124324098Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731124324098Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731124324098Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731124324098Cleaning up temporary data from old regions at 1731124324110 (+12 ms)Running coprocessor post-open hooks at 1731124324113 (+3 ms)Region opened successfully at 1731124324114 (+1 ms) 2024-11-09T03:52:04,115 INFO [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731124324060 2024-11-09T03:52:04,118 DEBUG [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-09T03:52:04,118 INFO [RS_OPEN_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-09T03:52:04,119 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=6ee74a15f3e3,33163,1731124322924 2024-11-09T03:52:04,120 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6ee74a15f3e3,33163,1731124322924, state=OPEN 2024-11-09T03:52:04,166 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-09T03:52:04,167 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-09T03:52:04,169 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-09T03:52:04,169 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-09T03:52:04,170 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-09T03:52:04,170 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-09T03:52:04,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44037-0x1011db8dd280001, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-09T03:52:04,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33163-0x1011db8dd280002, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-09T03:52:04,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37273-0x1011db8dd280000, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-09T03:52:04,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45149-0x1011db8dd280003, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-09T03:52:04,195 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=6ee74a15f3e3,33163,1731124322924 2024-11-09T03:52:04,196 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T03:52:04,196 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T03:52:04,196 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T03:52:04,196 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T03:52:04,201 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-09T03:52:04,201 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=6ee74a15f3e3,33163,1731124322924 in 292 msec 2024-11-09T03:52:04,206 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-09T03:52:04,206 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 794 msec 2024-11-09T03:52:04,207 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-09T03:52:04,207 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-09T03:52:04,209 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-09T03:52:04,210 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6ee74a15f3e3,33163,1731124322924, seqNum=-1] 2024-11-09T03:52:04,210 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-09T03:52:04,212 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46557, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-09T03:52:04,221 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 871 msec 2024-11-09T03:52:04,221 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731124324221, completionTime=-1 2024-11-09T03:52:04,221 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-09T03:52:04,221 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-09T03:52:04,223 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=3 2024-11-09T03:52:04,223 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731124384223 2024-11-09T03:52:04,223 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731124444223 2024-11-09T03:52:04,223 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-09T03:52:04,224 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-09T03:52:04,224 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ee74a15f3e3,37273,1731124322704-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T03:52:04,224 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ee74a15f3e3,37273,1731124322704-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T03:52:04,224 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ee74a15f3e3,37273,1731124322704-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T03:52:04,224 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-6ee74a15f3e3:37273, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T03:52:04,224 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-09T03:52:04,225 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-09T03:52:04,228 DEBUG [master/6ee74a15f3e3:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-09T03:52:04,230 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.191sec 2024-11-09T03:52:04,230 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-09T03:52:04,230 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-09T03:52:04,231 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-09T03:52:04,231 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-09T03:52:04,231 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-09T03:52:04,231 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ee74a15f3e3,37273,1731124322704-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-09T03:52:04,231 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ee74a15f3e3,37273,1731124322704-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-09T03:52:04,234 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-09T03:52:04,234 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-09T03:52:04,234 INFO [master/6ee74a15f3e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6ee74a15f3e3,37273,1731124322704-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T03:52:04,299 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17460c9e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-09T03:52:04,299 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 6ee74a15f3e3,37273,-1 for getting cluster id 2024-11-09T03:52:04,300 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-09T03:52:04,302 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c0ff621e-2ce4-4d13-927d-4891fe4d95b1' 2024-11-09T03:52:04,303 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-09T03:52:04,303 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c0ff621e-2ce4-4d13-927d-4891fe4d95b1" 2024-11-09T03:52:04,304 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4a30e10d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-09T03:52:04,304 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [6ee74a15f3e3,37273,-1] 2024-11-09T03:52:04,304 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-09T03:52:04,305 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T03:52:04,307 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52578, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-09T03:52:04,309 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b807966, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-09T03:52:04,309 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-09T03:52:04,311 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6ee74a15f3e3,33163,1731124322924, seqNum=-1] 2024-11-09T03:52:04,312 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-09T03:52:04,314 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60900, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-09T03:52:04,316 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=6ee74a15f3e3,37273,1731124322704 2024-11-09T03:52:04,317 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-09T03:52:04,318 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncConnectionImpl(321): The fetched master address is 6ee74a15f3e3,37273,1731124322704 2024-11-09T03:52:04,318 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@5863dd6d 2024-11-09T03:52:04,318 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-09T03:52:04,320 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52586, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-09T03:52:04,321 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37273 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-09T03:52:04,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37273 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-11-09T03:52:04,324 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-11-09T03:52:04,324 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T03:52:04,324 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37273 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-11-09T03:52:04,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37273 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-09T03:52:04,326 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-09T03:52:04,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37363 is added to blk_1073741837_1013 (size=392) 2024-11-09T03:52:04,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34661 is added to blk_1073741837_1013 (size=392) 2024-11-09T03:52:04,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34525 is added to blk_1073741837_1013 (size=392) 2024-11-09T03:52:04,337 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 1ce167c1e6d9e24939328189376167fb, NAME => 'TestHBaseWalOnEC,,1731124324320.1ce167c1e6d9e24939328189376167fb.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba 2024-11-09T03:52:04,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34525 is added to blk_1073741838_1014 (size=51) 2024-11-09T03:52:04,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37363 is added to blk_1073741838_1014 (size=51) 2024-11-09T03:52:04,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34661 is added to blk_1073741838_1014 (size=51) 2024-11-09T03:52:04,346 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731124324320.1ce167c1e6d9e24939328189376167fb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T03:52:04,347 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 1ce167c1e6d9e24939328189376167fb, disabling compactions & flushes 2024-11-09T03:52:04,347 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731124324320.1ce167c1e6d9e24939328189376167fb. 2024-11-09T03:52:04,347 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731124324320.1ce167c1e6d9e24939328189376167fb. 2024-11-09T03:52:04,347 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731124324320.1ce167c1e6d9e24939328189376167fb. after waiting 0 ms 2024-11-09T03:52:04,347 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731124324320.1ce167c1e6d9e24939328189376167fb. 2024-11-09T03:52:04,347 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731124324320.1ce167c1e6d9e24939328189376167fb. 2024-11-09T03:52:04,347 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 1ce167c1e6d9e24939328189376167fb: Waiting for close lock at 1731124324347Disabling compacts and flushes for region at 1731124324347Disabling writes for close at 1731124324347Writing region close event to WAL at 1731124324347Closed at 1731124324347 2024-11-09T03:52:04,349 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-11-09T03:52:04,349 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1731124324320.1ce167c1e6d9e24939328189376167fb.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1731124324349"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731124324349"}]},"ts":"1731124324349"} 2024-11-09T03:52:04,352 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-09T03:52:04,354 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-09T03:52:04,354 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731124324354"}]},"ts":"1731124324354"} 2024-11-09T03:52:04,357 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-11-09T03:52:04,357 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {6ee74a15f3e3=0} racks are {/default-rack=0} 2024-11-09T03:52:04,358 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-09T03:52:04,358 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-09T03:52:04,358 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-09T03:52:04,358 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-09T03:52:04,358 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-09T03:52:04,358 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-09T03:52:04,358 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-09T03:52:04,358 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-09T03:52:04,358 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-09T03:52:04,358 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-09T03:52:04,359 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=1ce167c1e6d9e24939328189376167fb, ASSIGN}] 2024-11-09T03:52:04,361 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=1ce167c1e6d9e24939328189376167fb, ASSIGN 2024-11-09T03:52:04,362 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=1ce167c1e6d9e24939328189376167fb, ASSIGN; state=OFFLINE, location=6ee74a15f3e3,45149,1731124322967; forceNewPlan=false, retain=false 2024-11-09T03:52:04,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37273 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-09T03:52:04,513 INFO [6ee74a15f3e3:37273 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-09T03:52:04,514 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=1ce167c1e6d9e24939328189376167fb, regionState=OPENING, regionLocation=6ee74a15f3e3,45149,1731124322967 2024-11-09T03:52:04,520 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=1ce167c1e6d9e24939328189376167fb, ASSIGN because future has completed 2024-11-09T03:52:04,521 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1ce167c1e6d9e24939328189376167fb, server=6ee74a15f3e3,45149,1731124322967}] 2024-11-09T03:52:04,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37273 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-09T03:52:04,676 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-09T03:52:04,679 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51743, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-09T03:52:04,686 INFO [RS_OPEN_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1731124324320.1ce167c1e6d9e24939328189376167fb. 2024-11-09T03:52:04,687 DEBUG [RS_OPEN_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 1ce167c1e6d9e24939328189376167fb, NAME => 'TestHBaseWalOnEC,,1731124324320.1ce167c1e6d9e24939328189376167fb.', STARTKEY => '', ENDKEY => ''} 2024-11-09T03:52:04,687 DEBUG [RS_OPEN_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 1ce167c1e6d9e24939328189376167fb 2024-11-09T03:52:04,687 DEBUG [RS_OPEN_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731124324320.1ce167c1e6d9e24939328189376167fb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T03:52:04,687 DEBUG [RS_OPEN_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 1ce167c1e6d9e24939328189376167fb 2024-11-09T03:52:04,688 DEBUG [RS_OPEN_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 1ce167c1e6d9e24939328189376167fb 2024-11-09T03:52:04,690 INFO [StoreOpener-1ce167c1e6d9e24939328189376167fb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 1ce167c1e6d9e24939328189376167fb 2024-11-09T03:52:04,692 INFO [StoreOpener-1ce167c1e6d9e24939328189376167fb-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1ce167c1e6d9e24939328189376167fb columnFamilyName cf 2024-11-09T03:52:04,692 DEBUG [StoreOpener-1ce167c1e6d9e24939328189376167fb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T03:52:04,693 INFO [StoreOpener-1ce167c1e6d9e24939328189376167fb-1 {}] regionserver.HStore(327): Store=1ce167c1e6d9e24939328189376167fb/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T03:52:04,693 DEBUG [RS_OPEN_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 1ce167c1e6d9e24939328189376167fb 2024-11-09T03:52:04,694 DEBUG [RS_OPEN_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/data/default/TestHBaseWalOnEC/1ce167c1e6d9e24939328189376167fb 2024-11-09T03:52:04,694 DEBUG [RS_OPEN_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/data/default/TestHBaseWalOnEC/1ce167c1e6d9e24939328189376167fb 2024-11-09T03:52:04,695 DEBUG [RS_OPEN_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 1ce167c1e6d9e24939328189376167fb 2024-11-09T03:52:04,695 DEBUG [RS_OPEN_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 1ce167c1e6d9e24939328189376167fb 2024-11-09T03:52:04,697 DEBUG [RS_OPEN_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 1ce167c1e6d9e24939328189376167fb 2024-11-09T03:52:04,699 DEBUG [RS_OPEN_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/data/default/TestHBaseWalOnEC/1ce167c1e6d9e24939328189376167fb/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-09T03:52:04,700 INFO [RS_OPEN_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 1ce167c1e6d9e24939328189376167fb; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=58933154, jitterRate=-0.12182757258415222}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-09T03:52:04,700 DEBUG [RS_OPEN_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1ce167c1e6d9e24939328189376167fb 2024-11-09T03:52:04,701 DEBUG [RS_OPEN_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 1ce167c1e6d9e24939328189376167fb: Running coprocessor pre-open hook at 1731124324688Writing region info on filesystem at 1731124324688Initializing all the Stores at 1731124324690 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731124324690Cleaning up temporary data from old regions at 1731124324695 (+5 ms)Running coprocessor post-open hooks at 1731124324700 (+5 ms)Region opened successfully at 1731124324701 (+1 ms) 2024-11-09T03:52:04,702 INFO [RS_OPEN_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1731124324320.1ce167c1e6d9e24939328189376167fb., pid=6, masterSystemTime=1731124324676 2024-11-09T03:52:04,706 DEBUG [RS_OPEN_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1731124324320.1ce167c1e6d9e24939328189376167fb. 2024-11-09T03:52:04,706 INFO [RS_OPEN_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1731124324320.1ce167c1e6d9e24939328189376167fb. 2024-11-09T03:52:04,707 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=1ce167c1e6d9e24939328189376167fb, regionState=OPEN, openSeqNum=2, regionLocation=6ee74a15f3e3,45149,1731124322967 2024-11-09T03:52:04,711 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1ce167c1e6d9e24939328189376167fb, server=6ee74a15f3e3,45149,1731124322967 because future has completed 2024-11-09T03:52:04,718 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-09T03:52:04,719 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 1ce167c1e6d9e24939328189376167fb, server=6ee74a15f3e3,45149,1731124322967 in 192 msec 2024-11-09T03:52:04,723 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-09T03:52:04,723 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=1ce167c1e6d9e24939328189376167fb, ASSIGN in 359 msec 2024-11-09T03:52:04,725 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-09T03:52:04,726 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731124324726"}]},"ts":"1731124324726"} 2024-11-09T03:52:04,729 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-11-09T03:52:04,731 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-11-09T03:52:04,735 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 411 msec 2024-11-09T03:52:04,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37273 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-09T03:52:04,953 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-11-09T03:52:04,953 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-11-09T03:52:04,953 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T03:52:04,957 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-11-09T03:52:04,958 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T03:52:04,958 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-11-09T03:52:04,963 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1731124324320.1ce167c1e6d9e24939328189376167fb., hostname=6ee74a15f3e3,45149,1731124322967, seqNum=2] 2024-11-09T03:52:04,963 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-09T03:52:04,966 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56692, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-09T03:52:04,970 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37273 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-11-09T03:52:04,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37273 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-11-09T03:52:04,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37273 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-09T03:52:04,974 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-11-09T03:52:04,976 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-09T03:52:04,976 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-09T03:52:05,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37273 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-09T03:52:05,131 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45149 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-09T03:52:05,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6ee74a15f3e3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1731124324320.1ce167c1e6d9e24939328189376167fb. 2024-11-09T03:52:05,133 INFO [RS_FLUSH_OPERATIONS-regionserver/6ee74a15f3e3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 1ce167c1e6d9e24939328189376167fb 1/1 column families, dataSize=32 B heapSize=360 B 2024-11-09T03:52:05,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6ee74a15f3e3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/data/default/TestHBaseWalOnEC/1ce167c1e6d9e24939328189376167fb/.tmp/cf/bbec55615ee64f94b2148f3f391a52bb is 36, key is row/cf:cq/1731124324967/Put/seqid=0 2024-11-09T03:52:05,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37363 is added to blk_1073741839_1015 (size=4787) 2024-11-09T03:52:05,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34525 is added to blk_1073741839_1015 (size=4787) 2024-11-09T03:52:05,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34661 is added to blk_1073741839_1015 (size=4787) 2024-11-09T03:52:05,163 INFO [RS_FLUSH_OPERATIONS-regionserver/6ee74a15f3e3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/data/default/TestHBaseWalOnEC/1ce167c1e6d9e24939328189376167fb/.tmp/cf/bbec55615ee64f94b2148f3f391a52bb 2024-11-09T03:52:05,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6ee74a15f3e3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/data/default/TestHBaseWalOnEC/1ce167c1e6d9e24939328189376167fb/.tmp/cf/bbec55615ee64f94b2148f3f391a52bb as hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/data/default/TestHBaseWalOnEC/1ce167c1e6d9e24939328189376167fb/cf/bbec55615ee64f94b2148f3f391a52bb 2024-11-09T03:52:05,181 INFO [RS_FLUSH_OPERATIONS-regionserver/6ee74a15f3e3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/data/default/TestHBaseWalOnEC/1ce167c1e6d9e24939328189376167fb/cf/bbec55615ee64f94b2148f3f391a52bb, entries=1, sequenceid=5, filesize=4.7 K 2024-11-09T03:52:05,183 INFO [RS_FLUSH_OPERATIONS-regionserver/6ee74a15f3e3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 1ce167c1e6d9e24939328189376167fb in 51ms, sequenceid=5, compaction requested=false 2024-11-09T03:52:05,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6ee74a15f3e3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 1ce167c1e6d9e24939328189376167fb: 2024-11-09T03:52:05,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6ee74a15f3e3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1731124324320.1ce167c1e6d9e24939328189376167fb. 2024-11-09T03:52:05,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6ee74a15f3e3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-09T03:52:05,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37273 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-09T03:52:05,189 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-09T03:52:05,189 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 209 msec 2024-11-09T03:52:05,193 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 220 msec 2024-11-09T03:52:05,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37273 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-09T03:52:05,294 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-11-09T03:52:05,300 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-09T03:52:05,301 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-09T03:52:05,301 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T03:52:05,301 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T03:52:05,301 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T03:52:05,301 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-09T03:52:05,302 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-09T03:52:05,302 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1775650297, stopped=false 2024-11-09T03:52:05,302 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=6ee74a15f3e3,37273,1731124322704 2024-11-09T03:52:05,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37273-0x1011db8dd280000, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-09T03:52:05,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33163-0x1011db8dd280002, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-09T03:52:05,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44037-0x1011db8dd280001, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-09T03:52:05,406 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45149-0x1011db8dd280003, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-09T03:52:05,406 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37273-0x1011db8dd280000, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:52:05,406 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33163-0x1011db8dd280002, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:52:05,407 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-09T03:52:05,407 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45149-0x1011db8dd280003, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:52:05,407 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-09T03:52:05,408 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44037-0x1011db8dd280001, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:52:05,408 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T03:52:05,408 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33163-0x1011db8dd280002, quorum=127.0.0.1:54975, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T03:52:05,408 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T03:52:05,408 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37273-0x1011db8dd280000, quorum=127.0.0.1:54975, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T03:52:05,409 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44037-0x1011db8dd280001, quorum=127.0.0.1:54975, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T03:52:05,409 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '6ee74a15f3e3,44037,1731124322884' ***** 2024-11-09T03:52:05,409 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-09T03:52:05,409 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '6ee74a15f3e3,33163,1731124322924' ***** 2024-11-09T03:52:05,409 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-09T03:52:05,410 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '6ee74a15f3e3,45149,1731124322967' ***** 2024-11-09T03:52:05,409 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45149-0x1011db8dd280003, quorum=127.0.0.1:54975, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T03:52:05,410 INFO [RS:0;6ee74a15f3e3:44037 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-09T03:52:05,410 INFO [RS:0;6ee74a15f3e3:44037 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-09T03:52:05,410 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-09T03:52:05,410 INFO [RS:0;6ee74a15f3e3:44037 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-09T03:52:05,411 INFO [RS:0;6ee74a15f3e3:44037 {}] regionserver.HRegionServer(959): stopping server 6ee74a15f3e3,44037,1731124322884 2024-11-09T03:52:05,410 INFO [RS:1;6ee74a15f3e3:33163 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-09T03:52:05,410 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-09T03:52:05,411 INFO [RS:0;6ee74a15f3e3:44037 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-09T03:52:05,411 INFO [RS:1;6ee74a15f3e3:33163 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-09T03:52:05,411 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-09T03:52:05,411 INFO [RS:0;6ee74a15f3e3:44037 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;6ee74a15f3e3:44037. 2024-11-09T03:52:05,411 INFO [RS:1;6ee74a15f3e3:33163 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-09T03:52:05,411 INFO [RS:2;6ee74a15f3e3:45149 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-09T03:52:05,411 INFO [RS:1;6ee74a15f3e3:33163 {}] regionserver.HRegionServer(959): stopping server 6ee74a15f3e3,33163,1731124322924 2024-11-09T03:52:05,411 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-09T03:52:05,411 INFO [RS:1;6ee74a15f3e3:33163 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-09T03:52:05,411 DEBUG [RS:0;6ee74a15f3e3:44037 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T03:52:05,412 INFO [RS:1;6ee74a15f3e3:33163 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;6ee74a15f3e3:33163. 2024-11-09T03:52:05,412 DEBUG [RS:0;6ee74a15f3e3:44037 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T03:52:05,412 INFO [RS:2;6ee74a15f3e3:45149 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-09T03:52:05,412 INFO [RS:2;6ee74a15f3e3:45149 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-09T03:52:05,412 INFO [RS:0;6ee74a15f3e3:44037 {}] regionserver.HRegionServer(976): stopping server 6ee74a15f3e3,44037,1731124322884; all regions closed. 2024-11-09T03:52:05,412 DEBUG [RS:1;6ee74a15f3e3:33163 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T03:52:05,412 INFO [RS:2;6ee74a15f3e3:45149 {}] regionserver.HRegionServer(3091): Received CLOSE for 1ce167c1e6d9e24939328189376167fb 2024-11-09T03:52:05,412 DEBUG [RS:1;6ee74a15f3e3:33163 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T03:52:05,412 INFO [RS:1;6ee74a15f3e3:33163 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-09T03:52:05,412 INFO [RS:1;6ee74a15f3e3:33163 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-09T03:52:05,412 INFO [RS:1;6ee74a15f3e3:33163 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-09T03:52:05,412 INFO [RS:2;6ee74a15f3e3:45149 {}] regionserver.HRegionServer(959): stopping server 6ee74a15f3e3,45149,1731124322967 2024-11-09T03:52:05,412 INFO [RS:2;6ee74a15f3e3:45149 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-09T03:52:05,412 INFO [RS:1;6ee74a15f3e3:33163 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-09T03:52:05,412 INFO [RS:2;6ee74a15f3e3:45149 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;6ee74a15f3e3:45149. 2024-11-09T03:52:05,412 DEBUG [RS_CLOSE_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 1ce167c1e6d9e24939328189376167fb, disabling compactions & flushes 2024-11-09T03:52:05,412 DEBUG [RS:2;6ee74a15f3e3:45149 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T03:52:05,412 INFO [RS_CLOSE_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731124324320.1ce167c1e6d9e24939328189376167fb. 2024-11-09T03:52:05,412 DEBUG [RS:2;6ee74a15f3e3:45149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T03:52:05,412 DEBUG [RS_CLOSE_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731124324320.1ce167c1e6d9e24939328189376167fb. 2024-11-09T03:52:05,412 DEBUG [RS_CLOSE_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731124324320.1ce167c1e6d9e24939328189376167fb. after waiting 0 ms 2024-11-09T03:52:05,413 INFO [RS:2;6ee74a15f3e3:45149 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-09T03:52:05,413 DEBUG [RS_CLOSE_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731124324320.1ce167c1e6d9e24939328189376167fb. 2024-11-09T03:52:05,413 DEBUG [RS:2;6ee74a15f3e3:45149 {}] regionserver.HRegionServer(1325): Online Regions={1ce167c1e6d9e24939328189376167fb=TestHBaseWalOnEC,,1731124324320.1ce167c1e6d9e24939328189376167fb.} 2024-11-09T03:52:05,413 DEBUG [RS:2;6ee74a15f3e3:45149 {}] regionserver.HRegionServer(1351): Waiting on 1ce167c1e6d9e24939328189376167fb 2024-11-09T03:52:05,416 INFO [RS:1;6ee74a15f3e3:33163 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-09T03:52:05,416 DEBUG [RS:1;6ee74a15f3e3:33163 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-09T03:52:05,416 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T03:52:05,416 DEBUG [RS:1;6ee74a15f3e3:33163 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-09T03:52:05,416 DEBUG [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-09T03:52:05,416 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T03:52:05,416 INFO [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-09T03:52:05,417 DEBUG [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-09T03:52:05,417 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T03:52:05,417 DEBUG [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-09T03:52:05,417 DEBUG [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-09T03:52:05,417 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T03:52:05,417 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T03:52:05,417 INFO [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-11-09T03:52:05,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34661 is added to blk_1073741833_1009 (size=93) 2024-11-09T03:52:05,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37363 is added to blk_1073741833_1009 (size=93) 2024-11-09T03:52:05,422 DEBUG [RS_CLOSE_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/data/default/TestHBaseWalOnEC/1ce167c1e6d9e24939328189376167fb/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-09T03:52:05,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34525 is added to blk_1073741833_1009 (size=93) 2024-11-09T03:52:05,423 INFO [RS_CLOSE_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731124324320.1ce167c1e6d9e24939328189376167fb. 2024-11-09T03:52:05,423 DEBUG [RS_CLOSE_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 1ce167c1e6d9e24939328189376167fb: Waiting for close lock at 1731124325412Running coprocessor pre-close hooks at 1731124325412Disabling compacts and flushes for region at 1731124325412Disabling writes for close at 1731124325413 (+1 ms)Writing region close event to WAL at 1731124325417 (+4 ms)Running coprocessor post-close hooks at 1731124325422 (+5 ms)Closed at 1731124325423 (+1 ms) 2024-11-09T03:52:05,423 DEBUG [RS_CLOSE_REGION-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1731124324320.1ce167c1e6d9e24939328189376167fb. 2024-11-09T03:52:05,423 DEBUG [RS:0;6ee74a15f3e3:44037 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/oldWALs 2024-11-09T03:52:05,424 INFO [RS:0;6ee74a15f3e3:44037 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6ee74a15f3e3%2C44037%2C1731124322884:(num 1731124323672) 2024-11-09T03:52:05,424 DEBUG [RS:0;6ee74a15f3e3:44037 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T03:52:05,424 INFO [RS:0;6ee74a15f3e3:44037 {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T03:52:05,424 INFO [RS:0;6ee74a15f3e3:44037 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-09T03:52:05,424 INFO [RS:0;6ee74a15f3e3:44037 {}] hbase.ChoreService(370): Chore service for: regionserver/6ee74a15f3e3:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-09T03:52:05,424 INFO [RS:0;6ee74a15f3e3:44037 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-09T03:52:05,424 INFO [regionserver/6ee74a15f3e3:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-09T03:52:05,424 INFO [RS:0;6ee74a15f3e3:44037 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-09T03:52:05,424 INFO [RS:0;6ee74a15f3e3:44037 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-09T03:52:05,424 INFO [RS:0;6ee74a15f3e3:44037 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-09T03:52:05,424 INFO [RS:0;6ee74a15f3e3:44037 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44037 2024-11-09T03:52:05,436 INFO [regionserver/6ee74a15f3e3:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T03:52:05,437 DEBUG [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/data/hbase/meta/1588230740/.tmp/info/ce25d11bf68b48e095efc2e8d688fb73 is 153, key is TestHBaseWalOnEC,,1731124324320.1ce167c1e6d9e24939328189376167fb./info:regioninfo/1731124324707/Put/seqid=0 2024-11-09T03:52:05,438 WARN [IPC Server handler 0 on default port 33683 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-09T03:52:05,438 WARN [IPC Server handler 0 on default port 33683 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-09T03:52:05,438 WARN [IPC Server handler 0 on default port 33683 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-09T03:52:05,439 INFO [regionserver/6ee74a15f3e3:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T03:52:05,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37363 is added to blk_1073741840_1016 (size=6637) 2024-11-09T03:52:05,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34525 is added to blk_1073741840_1016 (size=6637) 2024-11-09T03:52:05,444 INFO [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/data/hbase/meta/1588230740/.tmp/info/ce25d11bf68b48e095efc2e8d688fb73 2024-11-09T03:52:05,444 INFO [regionserver/6ee74a15f3e3:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T03:52:05,469 DEBUG [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/data/hbase/meta/1588230740/.tmp/ns/1f4f23573b514c34a8df6522b931d280 is 43, key is default/ns:d/1731124324212/Put/seqid=0 2024-11-09T03:52:05,470 WARN [IPC Server handler 4 on default port 33683 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-09T03:52:05,470 WARN [IPC Server handler 4 on default port 33683 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-09T03:52:05,470 WARN [IPC Server handler 4 on default port 33683 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-09T03:52:05,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37363 is added to blk_1073741841_1017 (size=5153) 2024-11-09T03:52:05,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34525 is added to blk_1073741841_1017 (size=5153) 2024-11-09T03:52:05,476 INFO [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/data/hbase/meta/1588230740/.tmp/ns/1f4f23573b514c34a8df6522b931d280 2024-11-09T03:52:05,492 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44037-0x1011db8dd280001, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/6ee74a15f3e3,44037,1731124322884 2024-11-09T03:52:05,492 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37273-0x1011db8dd280000, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-09T03:52:05,492 INFO [RS:0;6ee74a15f3e3:44037 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-09T03:52:05,497 DEBUG [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/data/hbase/meta/1588230740/.tmp/table/0dae7752edfc41959d1d6b16e4f25b19 is 52, key is TestHBaseWalOnEC/table:state/1731124324726/Put/seqid=0 2024-11-09T03:52:05,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34525 is added to blk_1073741842_1018 (size=5249) 2024-11-09T03:52:05,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37363 is added to blk_1073741842_1018 (size=5249) 2024-11-09T03:52:05,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34661 is added to blk_1073741842_1018 (size=5249) 2024-11-09T03:52:05,505 INFO [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/data/hbase/meta/1588230740/.tmp/table/0dae7752edfc41959d1d6b16e4f25b19 2024-11-09T03:52:05,514 DEBUG [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/data/hbase/meta/1588230740/.tmp/info/ce25d11bf68b48e095efc2e8d688fb73 as hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/data/hbase/meta/1588230740/info/ce25d11bf68b48e095efc2e8d688fb73 2024-11-09T03:52:05,523 INFO [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/data/hbase/meta/1588230740/info/ce25d11bf68b48e095efc2e8d688fb73, entries=10, sequenceid=11, filesize=6.5 K 2024-11-09T03:52:05,524 DEBUG [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/data/hbase/meta/1588230740/.tmp/ns/1f4f23573b514c34a8df6522b931d280 as hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/data/hbase/meta/1588230740/ns/1f4f23573b514c34a8df6522b931d280 2024-11-09T03:52:05,533 INFO [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/data/hbase/meta/1588230740/ns/1f4f23573b514c34a8df6522b931d280, entries=2, sequenceid=11, filesize=5.0 K 2024-11-09T03:52:05,534 DEBUG [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/data/hbase/meta/1588230740/.tmp/table/0dae7752edfc41959d1d6b16e4f25b19 as hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/data/hbase/meta/1588230740/table/0dae7752edfc41959d1d6b16e4f25b19 2024-11-09T03:52:05,537 INFO [regionserver/6ee74a15f3e3:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-09T03:52:05,537 INFO [regionserver/6ee74a15f3e3:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-09T03:52:05,542 INFO [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/data/hbase/meta/1588230740/table/0dae7752edfc41959d1d6b16e4f25b19, entries=2, sequenceid=11, filesize=5.1 K 2024-11-09T03:52:05,544 INFO [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 127ms, sequenceid=11, compaction requested=false 2024-11-09T03:52:05,550 DEBUG [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-09T03:52:05,550 DEBUG [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-09T03:52:05,551 INFO [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-09T03:52:05,551 DEBUG [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731124325416Running coprocessor pre-close hooks at 1731124325416Disabling compacts and flushes for region at 1731124325416Disabling writes for close at 1731124325417 (+1 ms)Obtaining lock to block concurrent updates at 1731124325417Preparing flush snapshotting stores in 1588230740 at 1731124325417Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1731124325417Flushing stores of hbase:meta,,1.1588230740 at 1731124325418 (+1 ms)Flushing 1588230740/info: creating writer at 1731124325418Flushing 1588230740/info: appending metadata at 1731124325436 (+18 ms)Flushing 1588230740/info: closing flushed file at 1731124325436Flushing 1588230740/ns: creating writer at 1731124325452 (+16 ms)Flushing 1588230740/ns: appending metadata at 1731124325468 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1731124325468Flushing 1588230740/table: creating writer at 1731124325482 (+14 ms)Flushing 1588230740/table: appending metadata at 1731124325496 (+14 ms)Flushing 1588230740/table: closing flushed file at 1731124325496Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@34994004: reopening flushed file at 1731124325513 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6dfecaa5: reopening flushed file at 1731124325523 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4a0efb01: reopening flushed file at 1731124325533 (+10 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 127ms, sequenceid=11, compaction requested=false at 1731124325544 (+11 ms)Writing region close event to WAL at 1731124325545 (+1 ms)Running coprocessor post-close hooks at 1731124325550 (+5 ms)Closed at 1731124325551 (+1 ms) 2024-11-09T03:52:05,551 DEBUG [RS_CLOSE_META-regionserver/6ee74a15f3e3:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-09T03:52:05,556 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [6ee74a15f3e3,44037,1731124322884] 2024-11-09T03:52:05,584 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/6ee74a15f3e3,44037,1731124322884 already deleted, retry=false 2024-11-09T03:52:05,584 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 6ee74a15f3e3,44037,1731124322884 expired; onlineServers=2 2024-11-09T03:52:05,613 INFO [RS:2;6ee74a15f3e3:45149 {}] regionserver.HRegionServer(976): stopping server 6ee74a15f3e3,45149,1731124322967; all regions closed. 2024-11-09T03:52:05,614 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T03:52:05,614 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T03:52:05,614 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T03:52:05,614 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T03:52:05,614 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T03:52:05,616 INFO [RS:1;6ee74a15f3e3:33163 {}] regionserver.HRegionServer(976): stopping server 6ee74a15f3e3,33163,1731124322924; all regions closed. 2024-11-09T03:52:05,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34661 is added to blk_1073741834_1010 (size=1298) 2024-11-09T03:52:05,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34525 is added to blk_1073741834_1010 (size=1298) 2024-11-09T03:52:05,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37363 is added to blk_1073741834_1010 (size=1298) 2024-11-09T03:52:05,617 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T03:52:05,618 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T03:52:05,618 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T03:52:05,618 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T03:52:05,618 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T03:52:05,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37363 is added to blk_1073741836_1012 (size=2751) 2024-11-09T03:52:05,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34661 is added to blk_1073741836_1012 (size=2751) 2024-11-09T03:52:05,621 DEBUG [RS:2;6ee74a15f3e3:45149 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/oldWALs 2024-11-09T03:52:05,621 INFO [RS:2;6ee74a15f3e3:45149 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6ee74a15f3e3%2C45149%2C1731124322967:(num 1731124323673) 2024-11-09T03:52:05,621 DEBUG [RS:2;6ee74a15f3e3:45149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T03:52:05,621 INFO [RS:2;6ee74a15f3e3:45149 {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T03:52:05,621 INFO [RS:2;6ee74a15f3e3:45149 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-09T03:52:05,622 INFO [RS:2;6ee74a15f3e3:45149 {}] hbase.ChoreService(370): Chore service for: regionserver/6ee74a15f3e3:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-09T03:52:05,622 INFO [RS:2;6ee74a15f3e3:45149 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-09T03:52:05,622 INFO [RS:2;6ee74a15f3e3:45149 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-09T03:52:05,622 INFO [regionserver/6ee74a15f3e3:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-09T03:52:05,622 INFO [RS:2;6ee74a15f3e3:45149 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-09T03:52:05,622 INFO [RS:2;6ee74a15f3e3:45149 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-09T03:52:05,622 INFO [RS:2;6ee74a15f3e3:45149 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45149 2024-11-09T03:52:05,624 DEBUG [RS:1;6ee74a15f3e3:33163 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/oldWALs 2024-11-09T03:52:05,624 INFO [RS:1;6ee74a15f3e3:33163 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6ee74a15f3e3%2C33163%2C1731124322924.meta:.meta(num 1731124324079) 2024-11-09T03:52:05,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34525 is added to blk_1073741836_1012 (size=2751) 2024-11-09T03:52:05,625 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T03:52:05,625 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T03:52:05,625 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T03:52:05,625 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T03:52:05,625 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T03:52:05,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34661 is added to blk_1073741835_1011 (size=93) 2024-11-09T03:52:05,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37363 is added to blk_1073741835_1011 (size=93) 2024-11-09T03:52:05,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34525 is added to blk_1073741835_1011 (size=93) 2024-11-09T03:52:05,628 INFO [regionserver/6ee74a15f3e3:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-09T03:52:05,628 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37273-0x1011db8dd280000, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-09T03:52:05,628 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45149-0x1011db8dd280003, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/6ee74a15f3e3,45149,1731124322967 2024-11-09T03:52:05,628 INFO [regionserver/6ee74a15f3e3:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-09T03:52:05,628 INFO [RS:2;6ee74a15f3e3:45149 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-09T03:52:05,629 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [6ee74a15f3e3,45149,1731124322967] 2024-11-09T03:52:05,634 DEBUG [RS:1;6ee74a15f3e3:33163 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/oldWALs 2024-11-09T03:52:05,634 INFO [RS:1;6ee74a15f3e3:33163 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6ee74a15f3e3%2C33163%2C1731124322924:(num 1731124323676) 2024-11-09T03:52:05,634 DEBUG [RS:1;6ee74a15f3e3:33163 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T03:52:05,634 INFO [RS:1;6ee74a15f3e3:33163 {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T03:52:05,634 INFO [RS:1;6ee74a15f3e3:33163 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-09T03:52:05,634 INFO [RS:1;6ee74a15f3e3:33163 {}] hbase.ChoreService(370): Chore service for: regionserver/6ee74a15f3e3:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-09T03:52:05,635 INFO [RS:1;6ee74a15f3e3:33163 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-09T03:52:05,635 INFO [regionserver/6ee74a15f3e3:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-09T03:52:05,635 INFO [RS:1;6ee74a15f3e3:33163 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33163 2024-11-09T03:52:05,649 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33163-0x1011db8dd280002, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/6ee74a15f3e3,33163,1731124322924 2024-11-09T03:52:05,649 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/6ee74a15f3e3,45149,1731124322967 already deleted, retry=false 2024-11-09T03:52:05,649 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37273-0x1011db8dd280000, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-09T03:52:05,649 INFO [RS:1;6ee74a15f3e3:33163 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-09T03:52:05,649 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 6ee74a15f3e3,45149,1731124322967 expired; onlineServers=1 2024-11-09T03:52:05,656 INFO [RS:0;6ee74a15f3e3:44037 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-09T03:52:05,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44037-0x1011db8dd280001, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T03:52:05,656 INFO [RS:0;6ee74a15f3e3:44037 {}] regionserver.HRegionServer(1031): Exiting; stopping=6ee74a15f3e3,44037,1731124322884; zookeeper connection closed. 2024-11-09T03:52:05,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44037-0x1011db8dd280001, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T03:52:05,657 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@54c1aa0d {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@54c1aa0d 2024-11-09T03:52:05,660 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [6ee74a15f3e3,33163,1731124322924] 2024-11-09T03:52:05,670 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/6ee74a15f3e3,33163,1731124322924 already deleted, retry=false 2024-11-09T03:52:05,670 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 6ee74a15f3e3,33163,1731124322924 expired; onlineServers=0 2024-11-09T03:52:05,670 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '6ee74a15f3e3,37273,1731124322704' ***** 2024-11-09T03:52:05,670 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-09T03:52:05,670 INFO [M:0;6ee74a15f3e3:37273 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-09T03:52:05,670 INFO [M:0;6ee74a15f3e3:37273 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-09T03:52:05,671 DEBUG [M:0;6ee74a15f3e3:37273 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-09T03:52:05,671 DEBUG [M:0;6ee74a15f3e3:37273 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-09T03:52:05,671 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster-HFileCleaner.small.0-1731124323355 {}] cleaner.HFileCleaner(306): Exit Thread[master/6ee74a15f3e3:0:becomeActiveMaster-HFileCleaner.small.0-1731124323355,5,FailOnTimeoutGroup] 2024-11-09T03:52:05,671 DEBUG [master/6ee74a15f3e3:0:becomeActiveMaster-HFileCleaner.large.0-1731124323355 {}] cleaner.HFileCleaner(306): Exit Thread[master/6ee74a15f3e3:0:becomeActiveMaster-HFileCleaner.large.0-1731124323355,5,FailOnTimeoutGroup] 2024-11-09T03:52:05,671 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-09T03:52:05,671 INFO [M:0;6ee74a15f3e3:37273 {}] hbase.ChoreService(370): Chore service for: master/6ee74a15f3e3:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-09T03:52:05,671 INFO [M:0;6ee74a15f3e3:37273 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-09T03:52:05,671 DEBUG [M:0;6ee74a15f3e3:37273 {}] master.HMaster(1795): Stopping service threads 2024-11-09T03:52:05,671 INFO [M:0;6ee74a15f3e3:37273 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-09T03:52:05,671 INFO [M:0;6ee74a15f3e3:37273 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-09T03:52:05,672 INFO [M:0;6ee74a15f3e3:37273 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-09T03:52:05,672 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-09T03:52:05,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37273-0x1011db8dd280000, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-09T03:52:05,681 DEBUG [M:0;6ee74a15f3e3:37273 {}] zookeeper.ZKUtil(347): master:37273-0x1011db8dd280000, quorum=127.0.0.1:54975, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-09T03:52:05,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37273-0x1011db8dd280000, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T03:52:05,681 WARN [M:0;6ee74a15f3e3:37273 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-09T03:52:05,682 INFO [M:0;6ee74a15f3e3:37273 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/.lastflushedseqids 2024-11-09T03:52:05,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34661 is added to blk_1073741843_1019 (size=127) 2024-11-09T03:52:05,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37363 is added to blk_1073741843_1019 (size=127) 2024-11-09T03:52:05,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34525 is added to blk_1073741843_1019 (size=127) 2024-11-09T03:52:05,690 INFO [M:0;6ee74a15f3e3:37273 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-09T03:52:05,690 INFO [M:0;6ee74a15f3e3:37273 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-09T03:52:05,690 DEBUG [M:0;6ee74a15f3e3:37273 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-09T03:52:05,690 INFO [M:0;6ee74a15f3e3:37273 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T03:52:05,690 DEBUG [M:0;6ee74a15f3e3:37273 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T03:52:05,690 DEBUG [M:0;6ee74a15f3e3:37273 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-09T03:52:05,690 DEBUG [M:0;6ee74a15f3e3:37273 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T03:52:05,690 INFO [M:0;6ee74a15f3e3:37273 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.82 KB heapSize=34.11 KB 2024-11-09T03:52:05,707 DEBUG [M:0;6ee74a15f3e3:37273 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/dfb18315716b4999a31c2df3c1c9fc33 is 82, key is hbase:meta,,1/info:regioninfo/1731124324119/Put/seqid=0 2024-11-09T03:52:05,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37363 is added to blk_1073741844_1020 (size=5672) 2024-11-09T03:52:05,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34525 is added to blk_1073741844_1020 (size=5672) 2024-11-09T03:52:05,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34661 is added to blk_1073741844_1020 (size=5672) 2024-11-09T03:52:05,715 INFO [M:0;6ee74a15f3e3:37273 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/dfb18315716b4999a31c2df3c1c9fc33 2024-11-09T03:52:05,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45149-0x1011db8dd280003, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T03:52:05,739 INFO [RS:2;6ee74a15f3e3:45149 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-09T03:52:05,739 INFO [RS:2;6ee74a15f3e3:45149 {}] regionserver.HRegionServer(1031): Exiting; stopping=6ee74a15f3e3,45149,1731124322967; zookeeper connection closed. 2024-11-09T03:52:05,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45149-0x1011db8dd280003, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T03:52:05,739 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4fd50962 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4fd50962 2024-11-09T03:52:05,739 DEBUG [M:0;6ee74a15f3e3:37273 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2886ad99287b41a79a57eb67a168bdeb is 747, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731124324734/Put/seqid=0 2024-11-09T03:52:05,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34525 is added to blk_1073741845_1021 (size=6438) 2024-11-09T03:52:05,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37363 is added to blk_1073741845_1021 (size=6438) 2024-11-09T03:52:05,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34661 is added to blk_1073741845_1021 (size=6438) 2024-11-09T03:52:05,748 INFO [M:0;6ee74a15f3e3:37273 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.13 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2886ad99287b41a79a57eb67a168bdeb 2024-11-09T03:52:05,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33163-0x1011db8dd280002, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T03:52:05,760 INFO [RS:1;6ee74a15f3e3:33163 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-09T03:52:05,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33163-0x1011db8dd280002, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T03:52:05,760 INFO [RS:1;6ee74a15f3e3:33163 {}] regionserver.HRegionServer(1031): Exiting; stopping=6ee74a15f3e3,33163,1731124322924; zookeeper connection closed. 2024-11-09T03:52:05,760 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3aa01277 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3aa01277 2024-11-09T03:52:05,761 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-09T03:52:05,771 DEBUG [M:0;6ee74a15f3e3:37273 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/93a1f651d4a44ab48e448aa0c9b7a80d is 69, key is 6ee74a15f3e3,33163,1731124322924/rs:state/1731124323463/Put/seqid=0 2024-11-09T03:52:05,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34661 is added to blk_1073741846_1022 (size=5294) 2024-11-09T03:52:05,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37363 is added to blk_1073741846_1022 (size=5294) 2024-11-09T03:52:05,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34525 is added to blk_1073741846_1022 (size=5294) 2024-11-09T03:52:05,781 INFO [M:0;6ee74a15f3e3:37273 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/93a1f651d4a44ab48e448aa0c9b7a80d 2024-11-09T03:52:05,788 DEBUG [M:0;6ee74a15f3e3:37273 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/dfb18315716b4999a31c2df3c1c9fc33 as hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/dfb18315716b4999a31c2df3c1c9fc33 2024-11-09T03:52:05,796 INFO [M:0;6ee74a15f3e3:37273 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/dfb18315716b4999a31c2df3c1c9fc33, entries=8, sequenceid=72, filesize=5.5 K 2024-11-09T03:52:05,797 DEBUG [M:0;6ee74a15f3e3:37273 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2886ad99287b41a79a57eb67a168bdeb as hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/2886ad99287b41a79a57eb67a168bdeb 2024-11-09T03:52:05,804 INFO [M:0;6ee74a15f3e3:37273 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/2886ad99287b41a79a57eb67a168bdeb, entries=8, sequenceid=72, filesize=6.3 K 2024-11-09T03:52:05,805 DEBUG [M:0;6ee74a15f3e3:37273 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/93a1f651d4a44ab48e448aa0c9b7a80d as hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/93a1f651d4a44ab48e448aa0c9b7a80d 2024-11-09T03:52:05,812 INFO [M:0;6ee74a15f3e3:37273 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33683/user/jenkins/test-data/b1728f33-6c0f-2b57-5d65-974f0fc80aba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/93a1f651d4a44ab48e448aa0c9b7a80d, entries=3, sequenceid=72, filesize=5.2 K 2024-11-09T03:52:05,814 INFO [M:0;6ee74a15f3e3:37273 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.82 KB/27459, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 124ms, sequenceid=72, compaction requested=false 2024-11-09T03:52:05,816 INFO [M:0;6ee74a15f3e3:37273 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T03:52:05,816 DEBUG [M:0;6ee74a15f3e3:37273 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731124325690Disabling compacts and flushes for region at 1731124325690Disabling writes for close at 1731124325690Obtaining lock to block concurrent updates at 1731124325690Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731124325690Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27459, getHeapSize=34864, getOffHeapSize=0, getCellsCount=85 at 1731124325691 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731124325692 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731124325692Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731124325706 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731124325706Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731124325723 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731124325738 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731124325739 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731124325755 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731124325770 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731124325770Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@52b95ea2: reopening flushed file at 1731124325787 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@ac0d73e: reopening flushed file at 1731124325796 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3a3b4550: reopening flushed file at 1731124325804 (+8 ms)Finished flush of dataSize ~26.82 KB/27459, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 124ms, sequenceid=72, compaction requested=false at 1731124325814 (+10 ms)Writing region close event to WAL at 1731124325816 (+2 ms)Closed at 1731124325816 2024-11-09T03:52:05,816 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T03:52:05,816 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T03:52:05,816 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T03:52:05,817 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T03:52:05,817 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T03:52:05,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34525 is added to blk_1073741830_1006 (size=32662) 2024-11-09T03:52:05,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34661 is added to blk_1073741830_1006 (size=32662) 2024-11-09T03:52:05,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37363 is added to blk_1073741830_1006 (size=32662) 2024-11-09T03:52:05,821 INFO [M:0;6ee74a15f3e3:37273 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-09T03:52:05,821 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-09T03:52:05,821 INFO [M:0;6ee74a15f3e3:37273 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37273 2024-11-09T03:52:05,821 INFO [M:0;6ee74a15f3e3:37273 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-09T03:52:05,928 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37273-0x1011db8dd280000, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T03:52:05,928 INFO [M:0;6ee74a15f3e3:37273 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-09T03:52:05,928 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37273-0x1011db8dd280000, quorum=127.0.0.1:54975, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T03:52:05,932 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@50064390{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T03:52:05,933 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@aa26abb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-09T03:52:05,933 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-09T03:52:05,933 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@18afd393{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-09T03:52:05,934 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@457c3b58{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f/hadoop.log.dir/,STOPPED} 2024-11-09T03:52:05,936 WARN [BP-926588790-172.17.0.2-1731124319905 heartbeating to localhost/127.0.0.1:33683 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-09T03:52:05,936 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-09T03:52:05,936 WARN [BP-926588790-172.17.0.2-1731124319905 heartbeating to localhost/127.0.0.1:33683 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-926588790-172.17.0.2-1731124319905 (Datanode Uuid cbb500c1-020a-4531-a43b-0d0eb62ad732) service to localhost/127.0.0.1:33683 2024-11-09T03:52:05,936 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-09T03:52:05,938 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f/cluster_df055b96-c134-1ead-2867-9d7e46247600/data/data5/current/BP-926588790-172.17.0.2-1731124319905 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T03:52:05,938 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f/cluster_df055b96-c134-1ead-2867-9d7e46247600/data/data6/current/BP-926588790-172.17.0.2-1731124319905 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T03:52:05,938 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-09T03:52:05,942 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3ce3bd23{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T03:52:05,942 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@92b1c57{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-09T03:52:05,942 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-09T03:52:05,943 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3700b027{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-09T03:52:05,943 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@119b8466{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f/hadoop.log.dir/,STOPPED} 2024-11-09T03:52:05,944 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-09T03:52:05,944 WARN [BP-926588790-172.17.0.2-1731124319905 heartbeating to localhost/127.0.0.1:33683 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-09T03:52:05,944 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-09T03:52:05,944 WARN [BP-926588790-172.17.0.2-1731124319905 heartbeating to localhost/127.0.0.1:33683 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-926588790-172.17.0.2-1731124319905 (Datanode Uuid bdaf8bbd-744d-42fc-8a57-0c0603eacbf4) service to localhost/127.0.0.1:33683 2024-11-09T03:52:05,945 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f/cluster_df055b96-c134-1ead-2867-9d7e46247600/data/data3/current/BP-926588790-172.17.0.2-1731124319905 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T03:52:05,945 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f/cluster_df055b96-c134-1ead-2867-9d7e46247600/data/data4/current/BP-926588790-172.17.0.2-1731124319905 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T03:52:05,945 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-09T03:52:05,947 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4c99f559{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T03:52:05,948 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3beb2b8e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-09T03:52:05,948 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-09T03:52:05,948 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@26256332{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-09T03:52:05,948 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3a5de444{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f/hadoop.log.dir/,STOPPED} 2024-11-09T03:52:05,949 WARN [BP-926588790-172.17.0.2-1731124319905 heartbeating to localhost/127.0.0.1:33683 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-09T03:52:05,949 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-09T03:52:05,949 WARN [BP-926588790-172.17.0.2-1731124319905 heartbeating to localhost/127.0.0.1:33683 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-926588790-172.17.0.2-1731124319905 (Datanode Uuid 4740fc86-1eb6-4b71-be8d-5081bf4a3117) service to localhost/127.0.0.1:33683 2024-11-09T03:52:05,949 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-09T03:52:05,950 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f/cluster_df055b96-c134-1ead-2867-9d7e46247600/data/data1/current/BP-926588790-172.17.0.2-1731124319905 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T03:52:05,950 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f/cluster_df055b96-c134-1ead-2867-9d7e46247600/data/data2/current/BP-926588790-172.17.0.2-1731124319905 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T03:52:05,951 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-09T03:52:05,956 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6a71642{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-09T03:52:05,956 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6daae1d7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-09T03:52:05,956 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-09T03:52:05,957 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70402c4c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-09T03:52:05,957 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@61c928f1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/c3753758-14f4-3440-3672-916f7ae3ac0f/hadoop.log.dir/,STOPPED} 2024-11-09T03:52:05,963 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-09T03:52:05,987 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-09T03:52:05,994 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=155 (was 94) - Thread LEAK? -, OpenFileDescriptor=518 (was 445) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=135 (was 138), ProcessCount=11 (was 11), AvailableMemoryMB=6063 (was 6206)