2024-11-10 13:52:25,615 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-11-10 13:52:25,627 main DEBUG Took 0.010234 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-10 13:52:25,628 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-10 13:52:25,628 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-10 13:52:25,629 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-10 13:52:25,630 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 13:52:25,640 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-10 13:52:25,656 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 13:52:25,657 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 13:52:25,658 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 13:52:25,658 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 13:52:25,659 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 13:52:25,659 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 13:52:25,660 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 13:52:25,660 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 13:52:25,661 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 13:52:25,661 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 13:52:25,662 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 13:52:25,662 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 13:52:25,663 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 13:52:25,663 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 13:52:25,663 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 13:52:25,664 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 13:52:25,664 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 13:52:25,664 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 13:52:25,665 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 13:52:25,665 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 13:52:25,665 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 13:52:25,666 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 13:52:25,666 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 13:52:25,666 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 13:52:25,667 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 13:52:25,667 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-10 13:52:25,669 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 13:52:25,670 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-10 13:52:25,671 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-10 13:52:25,672 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-10 13:52:25,673 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-10 13:52:25,673 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-10 13:52:25,681 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-10 13:52:25,683 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-10 13:52:25,685 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-10 13:52:25,685 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-10 13:52:25,686 main DEBUG createAppenders(={Console}) 2024-11-10 13:52:25,686 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-11-10 13:52:25,687 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-11-10 13:52:25,687 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-11-10 13:52:25,687 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-10 13:52:25,688 main DEBUG OutputStream closed 2024-11-10 13:52:25,688 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-10 13:52:25,688 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-10 13:52:25,688 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-11-10 13:52:25,755 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-10 13:52:25,757 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-10 13:52:25,758 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-10 13:52:25,759 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-10 13:52:25,760 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-10 13:52:25,760 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-10 13:52:25,760 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-10 13:52:25,761 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-10 13:52:25,761 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-10 13:52:25,761 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-10 13:52:25,762 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-10 13:52:25,762 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-10 13:52:25,762 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-10 13:52:25,762 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-10 13:52:25,763 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-10 13:52:25,763 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-10 13:52:25,763 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-10 13:52:25,764 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-10 13:52:25,766 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-10 13:52:25,767 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-11-10 13:52:25,767 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-10 13:52:25,768 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-11-10T13:52:25,782 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-11-10 13:52:25,784 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-10 13:52:25,784 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-10T13:52:26,011 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84 2024-11-10T13:52:26,035 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84/cluster_c5b26055-ab68-e52f-57e5-6cb60b7810be, deleteOnExit=true 2024-11-10T13:52:26,037 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84/test.cache.data in system properties and HBase conf 2024-11-10T13:52:26,037 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84/hadoop.tmp.dir in system properties and HBase conf 2024-11-10T13:52:26,038 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84/hadoop.log.dir in system properties and HBase conf 2024-11-10T13:52:26,039 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-10T13:52:26,039 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-10T13:52:26,040 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-10T13:52:26,126 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-10T13:52:26,214 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-10T13:52:26,219 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-10T13:52:26,219 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-10T13:52:26,220 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-10T13:52:26,220 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-10T13:52:26,221 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-10T13:52:26,221 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-10T13:52:26,222 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-10T13:52:26,222 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-10T13:52:26,223 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-10T13:52:26,224 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84/nfs.dump.dir in system properties and HBase conf 2024-11-10T13:52:26,224 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84/java.io.tmpdir in system properties and HBase conf 2024-11-10T13:52:26,224 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-10T13:52:26,225 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-10T13:52:26,225 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-10T13:52:27,318 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-10T13:52:27,385 INFO [Time-limited test {}] log.Log(170): Logging initialized @2391ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-10T13:52:27,449 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T13:52:27,505 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T13:52:27,524 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T13:52:27,524 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T13:52:27,526 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-10T13:52:27,537 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T13:52:27,540 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@760c69c0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84/hadoop.log.dir/,AVAILABLE} 2024-11-10T13:52:27,541 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ce709a8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T13:52:27,717 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@62d6efd9{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84/java.io.tmpdir/jetty-localhost-43097-hadoop-hdfs-3_4_1-tests_jar-_-any-4843785962666207442/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-10T13:52:27,724 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@353d35a1{HTTP/1.1, (http/1.1)}{localhost:43097} 2024-11-10T13:52:27,725 INFO [Time-limited test {}] server.Server(415): Started @2732ms 2024-11-10T13:52:28,316 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T13:52:28,326 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T13:52:28,327 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T13:52:28,327 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T13:52:28,327 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-10T13:52:28,328 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3a5de9e4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84/hadoop.log.dir/,AVAILABLE} 2024-11-10T13:52:28,329 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@69893329{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T13:52:28,425 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1b97a472{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84/java.io.tmpdir/jetty-localhost-39597-hadoop-hdfs-3_4_1-tests_jar-_-any-7460413533124933842/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:52:28,425 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3722a29b{HTTP/1.1, (http/1.1)}{localhost:39597} 2024-11-10T13:52:28,426 INFO [Time-limited test {}] server.Server(415): Started @3433ms 2024-11-10T13:52:28,471 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T13:52:28,573 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T13:52:28,580 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T13:52:28,582 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T13:52:28,582 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T13:52:28,582 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-10T13:52:28,583 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@510fec09{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84/hadoop.log.dir/,AVAILABLE} 2024-11-10T13:52:28,583 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@40eb7053{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T13:52:28,692 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@353955e9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84/java.io.tmpdir/jetty-localhost-36845-hadoop-hdfs-3_4_1-tests_jar-_-any-15808650836243673396/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:52:28,693 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@11738cd8{HTTP/1.1, (http/1.1)}{localhost:36845} 2024-11-10T13:52:28,693 INFO [Time-limited test {}] server.Server(415): Started @3700ms 2024-11-10T13:52:28,696 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T13:52:28,737 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T13:52:28,743 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T13:52:28,744 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T13:52:28,744 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T13:52:28,745 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-10T13:52:28,749 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@16cd567f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84/hadoop.log.dir/,AVAILABLE} 2024-11-10T13:52:28,749 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5822645a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T13:52:28,846 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3114ae69{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84/java.io.tmpdir/jetty-localhost-40587-hadoop-hdfs-3_4_1-tests_jar-_-any-8072233689308343656/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:52:28,847 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3c70a874{HTTP/1.1, (http/1.1)}{localhost:40587} 2024-11-10T13:52:28,847 INFO [Time-limited test {}] server.Server(415): Started @3854ms 2024-11-10T13:52:28,849 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T13:52:30,344 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84/cluster_c5b26055-ab68-e52f-57e5-6cb60b7810be/data/data1/current/BP-1365254765-172.17.0.3-1731246746770/current, will proceed with Du for space computation calculation, 2024-11-10T13:52:30,344 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84/cluster_c5b26055-ab68-e52f-57e5-6cb60b7810be/data/data4/current/BP-1365254765-172.17.0.3-1731246746770/current, will proceed with Du for space computation calculation, 2024-11-10T13:52:30,344 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84/cluster_c5b26055-ab68-e52f-57e5-6cb60b7810be/data/data2/current/BP-1365254765-172.17.0.3-1731246746770/current, will proceed with Du for space computation calculation, 2024-11-10T13:52:30,344 WARN [Thread-122 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84/cluster_c5b26055-ab68-e52f-57e5-6cb60b7810be/data/data3/current/BP-1365254765-172.17.0.3-1731246746770/current, will proceed with Du for space computation calculation, 2024-11-10T13:52:30,375 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T13:52:30,375 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T13:52:30,417 WARN [Thread-143 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84/cluster_c5b26055-ab68-e52f-57e5-6cb60b7810be/data/data6/current/BP-1365254765-172.17.0.3-1731246746770/current, will proceed with Du for space computation calculation, 2024-11-10T13:52:30,417 WARN [Thread-142 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84/cluster_c5b26055-ab68-e52f-57e5-6cb60b7810be/data/data5/current/BP-1365254765-172.17.0.3-1731246746770/current, will proceed with Du for space computation calculation, 2024-11-10T13:52:30,420 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2288ae499dd24084 with lease ID 0x53363de43fb9ec7d: Processing first storage report for DS-1ac35774-1f86-4084-b502-e4543384b1ee from datanode DatanodeRegistration(127.0.0.1:35197, datanodeUuid=e21c19b5-e4bd-4d98-bd31-53ea3ca956cb, infoPort=37243, infoSecurePort=0, ipcPort=36231, storageInfo=lv=-57;cid=testClusterID;nsid=482459471;c=1731246746770) 2024-11-10T13:52:30,422 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2288ae499dd24084 with lease ID 0x53363de43fb9ec7d: from storage DS-1ac35774-1f86-4084-b502-e4543384b1ee node DatanodeRegistration(127.0.0.1:35197, datanodeUuid=e21c19b5-e4bd-4d98-bd31-53ea3ca956cb, infoPort=37243, infoSecurePort=0, ipcPort=36231, storageInfo=lv=-57;cid=testClusterID;nsid=482459471;c=1731246746770), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-10T13:52:30,422 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd1098f2fdd264004 with lease ID 0x53363de43fb9ec7c: Processing first storage report for DS-e12fe57f-d5be-46d5-8491-b3b368da2951 from datanode DatanodeRegistration(127.0.0.1:40371, datanodeUuid=80b895b3-c902-4d4f-91a8-f256e45210a9, infoPort=40043, infoSecurePort=0, ipcPort=46267, storageInfo=lv=-57;cid=testClusterID;nsid=482459471;c=1731246746770) 2024-11-10T13:52:30,423 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd1098f2fdd264004 with lease ID 0x53363de43fb9ec7c: from storage DS-e12fe57f-d5be-46d5-8491-b3b368da2951 node DatanodeRegistration(127.0.0.1:40371, datanodeUuid=80b895b3-c902-4d4f-91a8-f256e45210a9, infoPort=40043, infoSecurePort=0, ipcPort=46267, storageInfo=lv=-57;cid=testClusterID;nsid=482459471;c=1731246746770), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T13:52:30,423 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2288ae499dd24084 with lease ID 0x53363de43fb9ec7d: Processing first storage report for DS-761dc032-82ca-43c3-ad2a-6a7bb4dac818 from datanode DatanodeRegistration(127.0.0.1:35197, datanodeUuid=e21c19b5-e4bd-4d98-bd31-53ea3ca956cb, infoPort=37243, infoSecurePort=0, ipcPort=36231, storageInfo=lv=-57;cid=testClusterID;nsid=482459471;c=1731246746770) 2024-11-10T13:52:30,423 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2288ae499dd24084 with lease ID 0x53363de43fb9ec7d: from storage DS-761dc032-82ca-43c3-ad2a-6a7bb4dac818 node DatanodeRegistration(127.0.0.1:35197, datanodeUuid=e21c19b5-e4bd-4d98-bd31-53ea3ca956cb, infoPort=37243, infoSecurePort=0, ipcPort=36231, storageInfo=lv=-57;cid=testClusterID;nsid=482459471;c=1731246746770), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-10T13:52:30,423 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd1098f2fdd264004 with lease ID 0x53363de43fb9ec7c: Processing first storage report for DS-4d9d80a2-8150-4520-9b9b-b897ba57e962 from datanode DatanodeRegistration(127.0.0.1:40371, datanodeUuid=80b895b3-c902-4d4f-91a8-f256e45210a9, infoPort=40043, infoSecurePort=0, ipcPort=46267, storageInfo=lv=-57;cid=testClusterID;nsid=482459471;c=1731246746770) 2024-11-10T13:52:30,424 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd1098f2fdd264004 with lease ID 0x53363de43fb9ec7c: from storage DS-4d9d80a2-8150-4520-9b9b-b897ba57e962 node DatanodeRegistration(127.0.0.1:40371, datanodeUuid=80b895b3-c902-4d4f-91a8-f256e45210a9, infoPort=40043, infoSecurePort=0, ipcPort=46267, storageInfo=lv=-57;cid=testClusterID;nsid=482459471;c=1731246746770), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T13:52:30,435 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T13:52:30,439 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb88b60fc210bf548 with lease ID 0x53363de43fb9ec7e: Processing first storage report for DS-895ba12e-f283-45f3-aa37-983c6b6504b2 from datanode DatanodeRegistration(127.0.0.1:44149, datanodeUuid=4ee6e221-f22e-4dea-9510-ec93742f5c09, infoPort=43429, infoSecurePort=0, ipcPort=46005, storageInfo=lv=-57;cid=testClusterID;nsid=482459471;c=1731246746770) 2024-11-10T13:52:30,439 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb88b60fc210bf548 with lease ID 0x53363de43fb9ec7e: from storage DS-895ba12e-f283-45f3-aa37-983c6b6504b2 node DatanodeRegistration(127.0.0.1:44149, datanodeUuid=4ee6e221-f22e-4dea-9510-ec93742f5c09, infoPort=43429, infoSecurePort=0, ipcPort=46005, storageInfo=lv=-57;cid=testClusterID;nsid=482459471;c=1731246746770), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-10T13:52:30,439 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb88b60fc210bf548 with lease ID 0x53363de43fb9ec7e: Processing first storage report for DS-ee0495ac-da6e-4c0b-a632-681f4018a5c3 from datanode DatanodeRegistration(127.0.0.1:44149, datanodeUuid=4ee6e221-f22e-4dea-9510-ec93742f5c09, infoPort=43429, infoSecurePort=0, ipcPort=46005, storageInfo=lv=-57;cid=testClusterID;nsid=482459471;c=1731246746770) 2024-11-10T13:52:30,439 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb88b60fc210bf548 with lease ID 0x53363de43fb9ec7e: from storage DS-ee0495ac-da6e-4c0b-a632-681f4018a5c3 node DatanodeRegistration(127.0.0.1:44149, datanodeUuid=4ee6e221-f22e-4dea-9510-ec93742f5c09, infoPort=43429, infoSecurePort=0, ipcPort=46005, storageInfo=lv=-57;cid=testClusterID;nsid=482459471;c=1731246746770), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T13:52:30,489 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84 2024-11-10T13:52:30,567 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-11-10T13:52:30,622 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=162, OpenFileDescriptor=391, MaxFileDescriptor=1048576, SystemLoadAverage=64, ProcessCount=11, AvailableMemoryMB=7460 2024-11-10T13:52:30,624 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-10T13:52:30,632 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-11-10T13:52:30,692 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84/cluster_c5b26055-ab68-e52f-57e5-6cb60b7810be/zookeeper_0, clientPort=62793, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84/cluster_c5b26055-ab68-e52f-57e5-6cb60b7810be/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84/cluster_c5b26055-ab68-e52f-57e5-6cb60b7810be/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-10T13:52:30,701 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62793 2024-11-10T13:52:30,721 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:52:30,725 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:52:30,812 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T13:52:30,813 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T13:52:30,855 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-180646471_22 at /127.0.0.1:39054 [Receiving block BP-1365254765-172.17.0.3-1731246746770:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:44149:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39054 dst: /127.0.0.1:44149 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:52:30,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44149 is added to blk_-9223372036854775792_1002 (size=7) 2024-11-10T13:52:31,274 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T13:52:31,287 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7 with version=8 2024-11-10T13:52:31,287 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/hbase-staging 2024-11-10T13:52:31,370 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-10T13:52:31,630 INFO [Time-limited test {}] client.ConnectionUtils(128): master/2467a7071e00:0 server-side Connection retries=45 2024-11-10T13:52:31,639 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T13:52:31,639 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T13:52:31,644 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T13:52:31,644 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T13:52:31,644 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T13:52:31,778 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-10T13:52:31,837 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-10T13:52:31,846 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-10T13:52:31,849 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T13:52:31,874 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 95090 (auto-detected) 2024-11-10T13:52:31,875 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:03 (auto-detected) 2024-11-10T13:52:31,893 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:36081 2024-11-10T13:52:31,915 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:36081 connecting to ZooKeeper ensemble=127.0.0.1:62793 2024-11-10T13:52:32,045 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:360810x0, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T13:52:32,049 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36081-0x1012504f7810000 connected 2024-11-10T13:52:32,137 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:52:32,140 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:52:32,151 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36081-0x1012504f7810000, quorum=127.0.0.1:62793, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T13:52:32,154 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7, hbase.cluster.distributed=false 2024-11-10T13:52:32,175 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36081-0x1012504f7810000, quorum=127.0.0.1:62793, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T13:52:32,179 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36081 2024-11-10T13:52:32,179 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36081 2024-11-10T13:52:32,180 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36081 2024-11-10T13:52:32,183 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36081 2024-11-10T13:52:32,184 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36081 2024-11-10T13:52:32,275 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2467a7071e00:0 server-side Connection retries=45 2024-11-10T13:52:32,276 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T13:52:32,277 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T13:52:32,277 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T13:52:32,277 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T13:52:32,277 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T13:52:32,279 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-10T13:52:32,282 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T13:52:32,283 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:39575 2024-11-10T13:52:32,285 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39575 connecting to ZooKeeper ensemble=127.0.0.1:62793 2024-11-10T13:52:32,286 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:52:32,290 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:52:32,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:395750x0, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T13:52:32,304 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:395750x0, quorum=127.0.0.1:62793, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T13:52:32,304 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39575-0x1012504f7810001 connected 2024-11-10T13:52:32,308 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-10T13:52:32,314 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-10T13:52:32,317 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39575-0x1012504f7810001, quorum=127.0.0.1:62793, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-10T13:52:32,322 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39575-0x1012504f7810001, quorum=127.0.0.1:62793, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T13:52:32,323 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39575 2024-11-10T13:52:32,323 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39575 2024-11-10T13:52:32,323 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39575 2024-11-10T13:52:32,324 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39575 2024-11-10T13:52:32,324 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39575 2024-11-10T13:52:32,341 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2467a7071e00:0 server-side Connection retries=45 2024-11-10T13:52:32,341 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T13:52:32,341 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T13:52:32,342 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T13:52:32,342 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T13:52:32,342 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T13:52:32,342 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-10T13:52:32,343 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T13:52:32,344 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:42563 2024-11-10T13:52:32,345 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42563 connecting to ZooKeeper ensemble=127.0.0.1:62793 2024-11-10T13:52:32,346 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:52:32,349 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:52:32,367 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:425630x0, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T13:52:32,368 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:425630x0, quorum=127.0.0.1:62793, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T13:52:32,368 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42563-0x1012504f7810002 connected 2024-11-10T13:52:32,368 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-10T13:52:32,369 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-10T13:52:32,370 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42563-0x1012504f7810002, quorum=127.0.0.1:62793, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-10T13:52:32,372 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42563-0x1012504f7810002, quorum=127.0.0.1:62793, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T13:52:32,373 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42563 2024-11-10T13:52:32,373 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42563 2024-11-10T13:52:32,374 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42563 2024-11-10T13:52:32,374 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42563 2024-11-10T13:52:32,375 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42563 2024-11-10T13:52:32,390 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2467a7071e00:0 server-side Connection retries=45 2024-11-10T13:52:32,390 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T13:52:32,390 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T13:52:32,390 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T13:52:32,390 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T13:52:32,390 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T13:52:32,391 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-10T13:52:32,391 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T13:52:32,392 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:33405 2024-11-10T13:52:32,393 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33405 connecting to ZooKeeper ensemble=127.0.0.1:62793 2024-11-10T13:52:32,394 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:52:32,397 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:52:32,409 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:334050x0, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T13:52:32,410 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:334050x0, quorum=127.0.0.1:62793, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T13:52:32,410 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33405-0x1012504f7810003 connected 2024-11-10T13:52:32,411 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-10T13:52:32,412 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-10T13:52:32,413 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33405-0x1012504f7810003, quorum=127.0.0.1:62793, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-10T13:52:32,416 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33405-0x1012504f7810003, quorum=127.0.0.1:62793, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T13:52:32,416 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33405 2024-11-10T13:52:32,417 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33405 2024-11-10T13:52:32,421 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33405 2024-11-10T13:52:32,422 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33405 2024-11-10T13:52:32,423 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33405 2024-11-10T13:52:32,439 DEBUG [M:0;2467a7071e00:36081 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;2467a7071e00:36081 2024-11-10T13:52:32,440 INFO [master/2467a7071e00:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/2467a7071e00,36081,1731246751482 2024-11-10T13:52:32,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42563-0x1012504f7810002, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T13:52:32,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33405-0x1012504f7810003, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T13:52:32,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39575-0x1012504f7810001, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T13:52:32,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36081-0x1012504f7810000, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T13:52:32,453 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36081-0x1012504f7810000, quorum=127.0.0.1:62793, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/2467a7071e00,36081,1731246751482 2024-11-10T13:52:32,490 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39575-0x1012504f7810001, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-10T13:52:32,490 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42563-0x1012504f7810002, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-10T13:52:32,490 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33405-0x1012504f7810003, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-10T13:52:32,490 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36081-0x1012504f7810000, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:32,490 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39575-0x1012504f7810001, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:32,490 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42563-0x1012504f7810002, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:32,490 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33405-0x1012504f7810003, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:32,492 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36081-0x1012504f7810000, quorum=127.0.0.1:62793, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-10T13:52:32,493 INFO [master/2467a7071e00:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/2467a7071e00,36081,1731246751482 from backup master directory 2024-11-10T13:52:32,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39575-0x1012504f7810001, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T13:52:32,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33405-0x1012504f7810003, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T13:52:32,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36081-0x1012504f7810000, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/2467a7071e00,36081,1731246751482 2024-11-10T13:52:32,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42563-0x1012504f7810002, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T13:52:32,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36081-0x1012504f7810000, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T13:52:32,505 WARN [master/2467a7071e00:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T13:52:32,505 INFO [master/2467a7071e00:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=2467a7071e00,36081,1731246751482 2024-11-10T13:52:32,507 INFO [master/2467a7071e00:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-10T13:52:32,508 INFO [master/2467a7071e00:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-10T13:52:32,569 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/hbase.id] with ID: 13a9d572-6f12-4046-9ea8-2328c6ba1309 2024-11-10T13:52:32,569 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/.tmp/hbase.id 2024-11-10T13:52:32,577 WARN [master/2467a7071e00:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T13:52:32,577 WARN [master/2467a7071e00:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T13:52:32,580 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-180646471_22 at /127.0.0.1:48356 [Receiving block BP-1365254765-172.17.0.3-1731246746770:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:35197:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48356 dst: /127.0.0.1:35197 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:52:32,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35197 is added to blk_-9223372036854775776_1004 (size=42) 2024-11-10T13:52:32,588 WARN [master/2467a7071e00:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T13:52:32,588 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/.tmp/hbase.id]:[hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/hbase.id] 2024-11-10T13:52:32,630 INFO [master/2467a7071e00:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:52:32,634 INFO [master/2467a7071e00:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-10T13:52:32,652 INFO [master/2467a7071e00:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 17ms. 2024-11-10T13:52:32,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33405-0x1012504f7810003, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:32,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42563-0x1012504f7810002, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:32,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39575-0x1012504f7810001, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:32,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36081-0x1012504f7810000, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:32,674 WARN [master/2467a7071e00:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T13:52:32,674 WARN [master/2467a7071e00:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T13:52:32,677 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-180646471_22 at /127.0.0.1:48388 [Receiving block BP-1365254765-172.17.0.3-1731246746770:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:35197:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48388 dst: /127.0.0.1:35197 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:52:32,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35197 is added to blk_-9223372036854775760_1006 (size=196) 2024-11-10T13:52:32,683 WARN [master/2467a7071e00:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T13:52:32,695 INFO [master/2467a7071e00:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-10T13:52:32,697 INFO [master/2467a7071e00:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-10T13:52:32,702 INFO [master/2467a7071e00:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-10T13:52:32,729 WARN [master/2467a7071e00:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T13:52:32,730 WARN [master/2467a7071e00:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T13:52:32,733 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-180646471_22 at /127.0.0.1:51332 [Receiving block BP-1365254765-172.17.0.3-1731246746770:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:40371:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51332 dst: /127.0.0.1:40371 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:52:32,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_-9223372036854775744_1008 (size=1189) 2024-11-10T13:52:32,739 WARN [master/2467a7071e00:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T13:52:32,755 INFO [master/2467a7071e00:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/MasterData/data/master/store 2024-11-10T13:52:32,771 WARN [master/2467a7071e00:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T13:52:32,771 WARN [master/2467a7071e00:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T13:52:32,774 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-180646471_22 at /127.0.0.1:51346 [Receiving block BP-1365254765-172.17.0.3-1731246746770:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:40371:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51346 dst: /127.0.0.1:40371 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:52:32,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_-9223372036854775728_1010 (size=34) 2024-11-10T13:52:32,780 WARN [master/2467a7071e00:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T13:52:32,783 INFO [master/2467a7071e00:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-10T13:52:32,786 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T13:52:32,787 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-10T13:52:32,787 INFO [master/2467a7071e00:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:52:32,787 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:52:32,789 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-10T13:52:32,789 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:52:32,789 INFO [master/2467a7071e00:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:52:32,790 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731246752787Disabling compacts and flushes for region at 1731246752787Disabling writes for close at 1731246752789 (+2 ms)Writing region close event to WAL at 1731246752789Closed at 1731246752789 2024-11-10T13:52:32,793 WARN [master/2467a7071e00:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/MasterData/data/master/store/.initializing 2024-11-10T13:52:32,793 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/MasterData/WALs/2467a7071e00,36081,1731246751482 2024-11-10T13:52:32,801 INFO [master/2467a7071e00:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-10T13:52:32,815 INFO [master/2467a7071e00:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2467a7071e00%2C36081%2C1731246751482, suffix=, logDir=hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/MasterData/WALs/2467a7071e00,36081,1731246751482, archiveDir=hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/MasterData/oldWALs, maxLogs=10 2024-11-10T13:52:32,847 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/MasterData/WALs/2467a7071e00,36081,1731246751482/2467a7071e00%2C36081%2C1731246751482.1731246752820, exclude list is [], retry=0 2024-11-10T13:52:32,865 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:414) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:473) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:468) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:52:32,867 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35197,DS-1ac35774-1f86-4084-b502-e4543384b1ee,DISK] 2024-11-10T13:52:32,867 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40371,DS-e12fe57f-d5be-46d5-8491-b3b368da2951,DISK] 2024-11-10T13:52:32,867 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44149,DS-895ba12e-f283-45f3-aa37-983c6b6504b2,DISK] 2024-11-10T13:52:32,870 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-10T13:52:32,907 INFO [master/2467a7071e00:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/MasterData/WALs/2467a7071e00,36081,1731246751482/2467a7071e00%2C36081%2C1731246751482.1731246752820 2024-11-10T13:52:32,907 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40043:40043),(127.0.0.1/127.0.0.1:37243:37243),(127.0.0.1/127.0.0.1:43429:43429)] 2024-11-10T13:52:32,908 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-10T13:52:32,908 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T13:52:32,911 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:52:32,912 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:52:32,947 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:52:32,969 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-10T13:52:32,972 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:52:32,975 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:52:32,975 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:52:32,979 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-10T13:52:32,980 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:52:32,980 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T13:52:32,981 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:52:32,983 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-10T13:52:32,983 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:52:32,984 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T13:52:32,985 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:52:32,987 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-10T13:52:32,987 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:52:32,988 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T13:52:32,989 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:52:32,992 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:52:32,993 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:52:32,998 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:52:32,998 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:52:33,002 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-10T13:52:33,006 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:52:33,013 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T13:52:33,014 INFO [master/2467a7071e00:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69546639, jitterRate=0.03632567822933197}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-10T13:52:33,021 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731246752922Initializing all the Stores at 1731246752924 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731246752925 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731246752926 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731246752926Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731246752926Cleaning up temporary data from old regions at 1731246752998 (+72 ms)Region opened successfully at 1731246753021 (+23 ms) 2024-11-10T13:52:33,023 INFO [master/2467a7071e00:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-10T13:52:33,055 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b238218, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2467a7071e00/172.17.0.3:0 2024-11-10T13:52:33,082 INFO [master/2467a7071e00:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-10T13:52:33,091 INFO [master/2467a7071e00:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-10T13:52:33,091 INFO [master/2467a7071e00:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-10T13:52:33,093 INFO [master/2467a7071e00:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-10T13:52:33,095 INFO [master/2467a7071e00:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-10T13:52:33,100 INFO [master/2467a7071e00:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 5 msec 2024-11-10T13:52:33,101 INFO [master/2467a7071e00:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-10T13:52:33,126 INFO [master/2467a7071e00:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-10T13:52:33,136 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36081-0x1012504f7810000, quorum=127.0.0.1:62793, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-10T13:52:33,187 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-10T13:52:33,191 INFO [master/2467a7071e00:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-10T13:52:33,193 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36081-0x1012504f7810000, quorum=127.0.0.1:62793, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-10T13:52:33,206 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-10T13:52:33,209 INFO [master/2467a7071e00:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-10T13:52:33,215 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36081-0x1012504f7810000, quorum=127.0.0.1:62793, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-10T13:52:33,227 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-10T13:52:33,228 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36081-0x1012504f7810000, quorum=127.0.0.1:62793, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-10T13:52:33,240 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-10T13:52:33,261 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36081-0x1012504f7810000, quorum=127.0.0.1:62793, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-10T13:52:33,271 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-10T13:52:33,282 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42563-0x1012504f7810002, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T13:52:33,282 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33405-0x1012504f7810003, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T13:52:33,282 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36081-0x1012504f7810000, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T13:52:33,282 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39575-0x1012504f7810001, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T13:52:33,282 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42563-0x1012504f7810002, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:33,283 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33405-0x1012504f7810003, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:33,283 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36081-0x1012504f7810000, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:33,283 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39575-0x1012504f7810001, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:33,287 INFO [master/2467a7071e00:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=2467a7071e00,36081,1731246751482, sessionid=0x1012504f7810000, setting cluster-up flag (Was=false) 2024-11-10T13:52:33,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42563-0x1012504f7810002, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:33,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33405-0x1012504f7810003, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:33,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39575-0x1012504f7810001, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:33,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36081-0x1012504f7810000, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:33,346 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-10T13:52:33,351 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2467a7071e00,36081,1731246751482 2024-11-10T13:52:33,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33405-0x1012504f7810003, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:33,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42563-0x1012504f7810002, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:33,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36081-0x1012504f7810000, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:33,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39575-0x1012504f7810001, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:33,408 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-10T13:52:33,410 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2467a7071e00,36081,1731246751482 2024-11-10T13:52:33,416 INFO [master/2467a7071e00:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-10T13:52:33,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_-9223372036854775789_1002 (size=7) 2024-11-10T13:52:33,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35197 is added to blk_-9223372036854775788_1002 (size=7) 2024-11-10T13:52:33,427 INFO [RS:1;2467a7071e00:42563 {}] regionserver.HRegionServer(746): ClusterId : 13a9d572-6f12-4046-9ea8-2328c6ba1309 2024-11-10T13:52:33,427 INFO [RS:2;2467a7071e00:33405 {}] regionserver.HRegionServer(746): ClusterId : 13a9d572-6f12-4046-9ea8-2328c6ba1309 2024-11-10T13:52:33,427 INFO [RS:0;2467a7071e00:39575 {}] regionserver.HRegionServer(746): ClusterId : 13a9d572-6f12-4046-9ea8-2328c6ba1309 2024-11-10T13:52:33,429 DEBUG [RS:1;2467a7071e00:42563 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-10T13:52:33,429 DEBUG [RS:2;2467a7071e00:33405 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-10T13:52:33,429 DEBUG [RS:0;2467a7071e00:39575 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-10T13:52:33,452 DEBUG [RS:2;2467a7071e00:33405 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-10T13:52:33,452 DEBUG [RS:1;2467a7071e00:42563 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-10T13:52:33,452 DEBUG [RS:0;2467a7071e00:39575 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-10T13:52:33,452 DEBUG [RS:2;2467a7071e00:33405 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-10T13:52:33,452 DEBUG [RS:1;2467a7071e00:42563 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-10T13:52:33,452 DEBUG [RS:0;2467a7071e00:39575 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-10T13:52:33,463 DEBUG [RS:0;2467a7071e00:39575 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-10T13:52:33,463 DEBUG [RS:2;2467a7071e00:33405 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-10T13:52:33,463 DEBUG [RS:1;2467a7071e00:42563 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-10T13:52:33,463 DEBUG [RS:0;2467a7071e00:39575 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@31f4f33c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2467a7071e00/172.17.0.3:0 2024-11-10T13:52:33,463 DEBUG [RS:2;2467a7071e00:33405 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ba05d13, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2467a7071e00/172.17.0.3:0 2024-11-10T13:52:33,463 DEBUG [RS:1;2467a7071e00:42563 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f69eaff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2467a7071e00/172.17.0.3:0 2024-11-10T13:52:33,477 DEBUG [RS:0;2467a7071e00:39575 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;2467a7071e00:39575 2024-11-10T13:52:33,479 DEBUG [RS:2;2467a7071e00:33405 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;2467a7071e00:33405 2024-11-10T13:52:33,480 INFO [RS:0;2467a7071e00:39575 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-10T13:52:33,480 INFO [RS:2;2467a7071e00:33405 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-10T13:52:33,480 INFO [RS:0;2467a7071e00:39575 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-10T13:52:33,480 INFO [RS:2;2467a7071e00:33405 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-10T13:52:33,480 DEBUG [RS:0;2467a7071e00:39575 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-10T13:52:33,480 DEBUG [RS:2;2467a7071e00:33405 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-10T13:52:33,482 DEBUG [RS:1;2467a7071e00:42563 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;2467a7071e00:42563 2024-11-10T13:52:33,482 INFO [RS:1;2467a7071e00:42563 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-10T13:52:33,482 INFO [RS:1;2467a7071e00:42563 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-10T13:52:33,482 DEBUG [RS:1;2467a7071e00:42563 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-10T13:52:33,483 INFO [RS:0;2467a7071e00:39575 {}] regionserver.HRegionServer(2659): reportForDuty to master=2467a7071e00,36081,1731246751482 with port=39575, startcode=1731246752244 2024-11-10T13:52:33,483 INFO [RS:1;2467a7071e00:42563 {}] regionserver.HRegionServer(2659): reportForDuty to master=2467a7071e00,36081,1731246751482 with port=42563, startcode=1731246752340 2024-11-10T13:52:33,483 INFO [RS:2;2467a7071e00:33405 {}] regionserver.HRegionServer(2659): reportForDuty to master=2467a7071e00,36081,1731246751482 with port=33405, startcode=1731246752389 2024-11-10T13:52:33,495 DEBUG [RS:1;2467a7071e00:42563 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-10T13:52:33,495 DEBUG [RS:2;2467a7071e00:33405 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-10T13:52:33,495 DEBUG [RS:0;2467a7071e00:39575 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-10T13:52:33,501 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-10T13:52:33,513 INFO [master/2467a7071e00:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-10T13:52:33,524 INFO [master/2467a7071e00:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-10T13:52:33,529 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:44035, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-10T13:52:33,529 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39883, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-10T13:52:33,529 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:58763, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-10T13:52:33,532 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 2467a7071e00,36081,1731246751482 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-10T13:52:33,536 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36081 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-10T13:52:33,539 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/2467a7071e00:0, corePoolSize=5, maxPoolSize=5 2024-11-10T13:52:33,540 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/2467a7071e00:0, corePoolSize=5, maxPoolSize=5 2024-11-10T13:52:33,540 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/2467a7071e00:0, corePoolSize=5, maxPoolSize=5 2024-11-10T13:52:33,540 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/2467a7071e00:0, corePoolSize=5, maxPoolSize=5 2024-11-10T13:52:33,540 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/2467a7071e00:0, corePoolSize=10, maxPoolSize=10 2024-11-10T13:52:33,540 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:33,540 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/2467a7071e00:0, corePoolSize=2, maxPoolSize=2 2024-11-10T13:52:33,540 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:33,542 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36081 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-10T13:52:33,543 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36081 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-10T13:52:33,548 INFO [master/2467a7071e00:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731246783547 2024-11-10T13:52:33,548 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T13:52:33,549 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-10T13:52:33,549 INFO [master/2467a7071e00:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-10T13:52:33,551 INFO [master/2467a7071e00:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-10T13:52:33,555 INFO [master/2467a7071e00:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-10T13:52:33,555 INFO [master/2467a7071e00:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-10T13:52:33,555 INFO [master/2467a7071e00:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-10T13:52:33,556 INFO [master/2467a7071e00:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-10T13:52:33,556 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:52:33,556 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-10T13:52:33,558 INFO [master/2467a7071e00:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:33,566 DEBUG [RS:1;2467a7071e00:42563 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-10T13:52:33,566 DEBUG [RS:2;2467a7071e00:33405 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-10T13:52:33,566 DEBUG [RS:0;2467a7071e00:39575 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-10T13:52:33,567 WARN [RS:2;2467a7071e00:33405 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-10T13:52:33,567 WARN [RS:0;2467a7071e00:39575 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-10T13:52:33,567 WARN [RS:1;2467a7071e00:42563 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-10T13:52:33,568 INFO [master/2467a7071e00:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-10T13:52:33,569 INFO [master/2467a7071e00:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-10T13:52:33,570 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T13:52:33,570 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T13:52:33,570 INFO [master/2467a7071e00:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-10T13:52:33,573 INFO [master/2467a7071e00:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-10T13:52:33,574 INFO [master/2467a7071e00:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-10T13:52:33,576 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-180646471_22 at /127.0.0.1:37830 [Receiving block BP-1365254765-172.17.0.3-1731246746770:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:44149:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37830 dst: /127.0.0.1:44149 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:52:33,576 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/2467a7071e00:0:becomeActiveMaster-HFileCleaner.large.0-1731246753575,5,FailOnTimeoutGroup] 2024-11-10T13:52:33,577 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/2467a7071e00:0:becomeActiveMaster-HFileCleaner.small.0-1731246753576,5,FailOnTimeoutGroup] 2024-11-10T13:52:33,577 INFO [master/2467a7071e00:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:33,577 INFO [master/2467a7071e00:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-10T13:52:33,578 INFO [master/2467a7071e00:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:33,579 INFO [master/2467a7071e00:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:33,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44149 is added to blk_-9223372036854775712_1013 (size=1321) 2024-11-10T13:52:33,585 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T13:52:33,586 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-10T13:52:33,587 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7 2024-11-10T13:52:33,594 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T13:52:33,594 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T13:52:33,600 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-180646471_22 at /127.0.0.1:51388 [Receiving block BP-1365254765-172.17.0.3-1731246746770:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:40371:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51388 dst: /127.0.0.1:40371 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:52:33,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_-9223372036854775696_1015 (size=32) 2024-11-10T13:52:33,605 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T13:52:33,606 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T13:52:33,608 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-10T13:52:33,611 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-10T13:52:33,611 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:52:33,612 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:52:33,612 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-10T13:52:33,615 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-10T13:52:33,615 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:52:33,616 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:52:33,616 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-10T13:52:33,619 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-10T13:52:33,619 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:52:33,620 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:52:33,621 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-10T13:52:33,623 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-10T13:52:33,624 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:52:33,625 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:52:33,625 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-10T13:52:33,627 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/data/hbase/meta/1588230740 2024-11-10T13:52:33,628 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/data/hbase/meta/1588230740 2024-11-10T13:52:33,630 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-10T13:52:33,631 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-10T13:52:33,632 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-10T13:52:33,634 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-10T13:52:33,640 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T13:52:33,641 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61890120, jitterRate=-0.07776534557342529}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-10T13:52:33,645 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731246753606Initializing all the Stores at 1731246753608 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731246753608Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731246753608Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731246753608Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731246753608Cleaning up temporary data from old regions at 1731246753631 (+23 ms)Region opened successfully at 1731246753645 (+14 ms) 2024-11-10T13:52:33,645 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-10T13:52:33,645 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-10T13:52:33,645 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-10T13:52:33,645 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-10T13:52:33,645 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-10T13:52:33,647 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-10T13:52:33,647 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731246753645Disabling compacts and flushes for region at 1731246753645Disabling writes for close at 1731246753645Writing region close event to WAL at 1731246753646 (+1 ms)Closed at 1731246753647 (+1 ms) 2024-11-10T13:52:33,650 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T13:52:33,650 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-10T13:52:33,656 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-10T13:52:33,666 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-10T13:52:33,668 INFO [RS:2;2467a7071e00:33405 {}] regionserver.HRegionServer(2659): reportForDuty to master=2467a7071e00,36081,1731246751482 with port=33405, startcode=1731246752389 2024-11-10T13:52:33,668 INFO [RS:0;2467a7071e00:39575 {}] regionserver.HRegionServer(2659): reportForDuty to master=2467a7071e00,36081,1731246751482 with port=39575, startcode=1731246752244 2024-11-10T13:52:33,668 INFO [RS:1;2467a7071e00:42563 {}] regionserver.HRegionServer(2659): reportForDuty to master=2467a7071e00,36081,1731246751482 with port=42563, startcode=1731246752340 2024-11-10T13:52:33,670 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-10T13:52:33,670 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36081 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2467a7071e00,42563,1731246752340 2024-11-10T13:52:33,672 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36081 {}] master.ServerManager(517): Registering regionserver=2467a7071e00,42563,1731246752340 2024-11-10T13:52:33,679 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36081 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2467a7071e00,33405,1731246752389 2024-11-10T13:52:33,679 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36081 {}] master.ServerManager(517): Registering regionserver=2467a7071e00,33405,1731246752389 2024-11-10T13:52:33,679 DEBUG [RS:1;2467a7071e00:42563 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7 2024-11-10T13:52:33,679 DEBUG [RS:1;2467a7071e00:42563 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44185 2024-11-10T13:52:33,679 DEBUG [RS:1;2467a7071e00:42563 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-10T13:52:33,681 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36081 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2467a7071e00,39575,1731246752244 2024-11-10T13:52:33,681 DEBUG [RS:2;2467a7071e00:33405 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7 2024-11-10T13:52:33,681 DEBUG [RS:2;2467a7071e00:33405 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44185 2024-11-10T13:52:33,682 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36081 {}] master.ServerManager(517): Registering regionserver=2467a7071e00,39575,1731246752244 2024-11-10T13:52:33,682 DEBUG [RS:2;2467a7071e00:33405 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-10T13:52:33,684 DEBUG [RS:0;2467a7071e00:39575 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7 2024-11-10T13:52:33,684 DEBUG [RS:0;2467a7071e00:39575 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44185 2024-11-10T13:52:33,684 DEBUG [RS:0;2467a7071e00:39575 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-10T13:52:33,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36081-0x1012504f7810000, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T13:52:33,733 DEBUG [RS:1;2467a7071e00:42563 {}] zookeeper.ZKUtil(111): regionserver:42563-0x1012504f7810002, quorum=127.0.0.1:62793, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2467a7071e00,42563,1731246752340 2024-11-10T13:52:33,733 WARN [RS:1;2467a7071e00:42563 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T13:52:33,734 DEBUG [RS:2;2467a7071e00:33405 {}] zookeeper.ZKUtil(111): regionserver:33405-0x1012504f7810003, quorum=127.0.0.1:62793, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2467a7071e00,33405,1731246752389 2024-11-10T13:52:33,734 INFO [RS:1;2467a7071e00:42563 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-10T13:52:33,734 WARN [RS:2;2467a7071e00:33405 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T13:52:33,734 DEBUG [RS:0;2467a7071e00:39575 {}] zookeeper.ZKUtil(111): regionserver:39575-0x1012504f7810001, quorum=127.0.0.1:62793, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2467a7071e00,39575,1731246752244 2024-11-10T13:52:33,734 INFO [RS:2;2467a7071e00:33405 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-10T13:52:33,734 WARN [RS:0;2467a7071e00:39575 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T13:52:33,734 DEBUG [RS:1;2467a7071e00:42563 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/WALs/2467a7071e00,42563,1731246752340 2024-11-10T13:52:33,735 DEBUG [RS:2;2467a7071e00:33405 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/WALs/2467a7071e00,33405,1731246752389 2024-11-10T13:52:33,735 INFO [RS:0;2467a7071e00:39575 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-10T13:52:33,735 DEBUG [RS:0;2467a7071e00:39575 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/WALs/2467a7071e00,39575,1731246752244 2024-11-10T13:52:33,736 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2467a7071e00,33405,1731246752389] 2024-11-10T13:52:33,736 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2467a7071e00,42563,1731246752340] 2024-11-10T13:52:33,737 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2467a7071e00,39575,1731246752244] 2024-11-10T13:52:33,760 INFO [RS:1;2467a7071e00:42563 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-10T13:52:33,760 INFO [RS:0;2467a7071e00:39575 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-10T13:52:33,760 INFO [RS:2;2467a7071e00:33405 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-10T13:52:33,777 INFO [RS:0;2467a7071e00:39575 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-10T13:52:33,777 INFO [RS:2;2467a7071e00:33405 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-10T13:52:33,777 INFO [RS:1;2467a7071e00:42563 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-10T13:52:33,783 INFO [RS:0;2467a7071e00:39575 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-10T13:52:33,783 INFO [RS:2;2467a7071e00:33405 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-10T13:52:33,783 INFO [RS:1;2467a7071e00:42563 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-10T13:52:33,783 INFO [RS:0;2467a7071e00:39575 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:33,783 INFO [RS:2;2467a7071e00:33405 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:33,783 INFO [RS:1;2467a7071e00:42563 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:33,784 INFO [RS:2;2467a7071e00:33405 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-10T13:52:33,785 INFO [RS:1;2467a7071e00:42563 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-10T13:52:33,787 INFO [RS:0;2467a7071e00:39575 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-10T13:52:33,789 INFO [RS:2;2467a7071e00:33405 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-10T13:52:33,789 INFO [RS:0;2467a7071e00:39575 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-10T13:52:33,789 INFO [RS:1;2467a7071e00:42563 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-10T13:52:33,791 INFO [RS:0;2467a7071e00:39575 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:33,791 INFO [RS:2;2467a7071e00:33405 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:33,791 INFO [RS:1;2467a7071e00:42563 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:33,791 DEBUG [RS:1;2467a7071e00:42563 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:33,791 DEBUG [RS:0;2467a7071e00:39575 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:33,791 DEBUG [RS:0;2467a7071e00:39575 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:33,791 DEBUG [RS:1;2467a7071e00:42563 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:33,791 DEBUG [RS:0;2467a7071e00:39575 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:33,791 DEBUG [RS:1;2467a7071e00:42563 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:33,791 DEBUG [RS:0;2467a7071e00:39575 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:33,791 DEBUG [RS:1;2467a7071e00:42563 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:33,792 DEBUG [RS:0;2467a7071e00:39575 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:33,792 DEBUG [RS:1;2467a7071e00:42563 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:33,792 DEBUG [RS:0;2467a7071e00:39575 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2467a7071e00:0, corePoolSize=2, maxPoolSize=2 2024-11-10T13:52:33,792 DEBUG [RS:1;2467a7071e00:42563 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2467a7071e00:0, corePoolSize=2, maxPoolSize=2 2024-11-10T13:52:33,792 DEBUG [RS:0;2467a7071e00:39575 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:33,792 DEBUG [RS:1;2467a7071e00:42563 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:33,792 DEBUG [RS:2;2467a7071e00:33405 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:33,792 DEBUG [RS:1;2467a7071e00:42563 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:33,792 DEBUG [RS:2;2467a7071e00:33405 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:33,792 DEBUG [RS:0;2467a7071e00:39575 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:33,792 DEBUG [RS:2;2467a7071e00:33405 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:33,792 DEBUG [RS:0;2467a7071e00:39575 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:33,792 DEBUG [RS:1;2467a7071e00:42563 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:33,792 DEBUG [RS:2;2467a7071e00:33405 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:33,792 DEBUG [RS:0;2467a7071e00:39575 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:33,792 DEBUG [RS:2;2467a7071e00:33405 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:33,792 DEBUG [RS:1;2467a7071e00:42563 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:33,792 DEBUG [RS:2;2467a7071e00:33405 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2467a7071e00:0, corePoolSize=2, maxPoolSize=2 2024-11-10T13:52:33,792 DEBUG [RS:0;2467a7071e00:39575 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:33,792 DEBUG [RS:1;2467a7071e00:42563 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:33,793 DEBUG [RS:2;2467a7071e00:33405 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:33,793 DEBUG [RS:0;2467a7071e00:39575 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:33,793 DEBUG [RS:1;2467a7071e00:42563 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:33,793 DEBUG [RS:2;2467a7071e00:33405 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:33,793 DEBUG [RS:2;2467a7071e00:33405 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:33,793 DEBUG [RS:0;2467a7071e00:39575 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2467a7071e00:0, corePoolSize=3, maxPoolSize=3 2024-11-10T13:52:33,793 DEBUG [RS:1;2467a7071e00:42563 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2467a7071e00:0, corePoolSize=3, maxPoolSize=3 2024-11-10T13:52:33,793 DEBUG [RS:2;2467a7071e00:33405 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:33,793 DEBUG [RS:0;2467a7071e00:39575 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2467a7071e00:0, corePoolSize=3, maxPoolSize=3 2024-11-10T13:52:33,793 DEBUG [RS:1;2467a7071e00:42563 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2467a7071e00:0, corePoolSize=3, maxPoolSize=3 2024-11-10T13:52:33,793 DEBUG [RS:2;2467a7071e00:33405 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:33,793 DEBUG [RS:2;2467a7071e00:33405 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:33,793 DEBUG [RS:2;2467a7071e00:33405 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2467a7071e00:0, corePoolSize=3, maxPoolSize=3 2024-11-10T13:52:33,793 DEBUG [RS:2;2467a7071e00:33405 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2467a7071e00:0, corePoolSize=3, maxPoolSize=3 2024-11-10T13:52:33,796 INFO [RS:1;2467a7071e00:42563 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:33,796 INFO [RS:1;2467a7071e00:42563 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:33,796 INFO [RS:1;2467a7071e00:42563 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:33,796 INFO [RS:1;2467a7071e00:42563 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:33,796 INFO [RS:1;2467a7071e00:42563 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:33,796 INFO [RS:0;2467a7071e00:39575 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:33,796 INFO [RS:1;2467a7071e00:42563 {}] hbase.ChoreService(168): Chore ScheduledChore name=2467a7071e00,42563,1731246752340-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T13:52:33,796 INFO [RS:0;2467a7071e00:39575 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:33,796 INFO [RS:0;2467a7071e00:39575 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:33,796 INFO [RS:0;2467a7071e00:39575 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:33,796 INFO [RS:0;2467a7071e00:39575 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:33,797 INFO [RS:0;2467a7071e00:39575 {}] hbase.ChoreService(168): Chore ScheduledChore name=2467a7071e00,39575,1731246752244-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T13:52:33,803 INFO [RS:2;2467a7071e00:33405 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:33,803 INFO [RS:2;2467a7071e00:33405 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:33,803 INFO [RS:2;2467a7071e00:33405 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:33,803 INFO [RS:2;2467a7071e00:33405 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:33,803 INFO [RS:2;2467a7071e00:33405 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:33,803 INFO [RS:2;2467a7071e00:33405 {}] hbase.ChoreService(168): Chore ScheduledChore name=2467a7071e00,33405,1731246752389-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T13:52:33,821 WARN [2467a7071e00:36081 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-10T13:52:33,821 INFO [RS:1;2467a7071e00:42563 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-10T13:52:33,821 INFO [RS:2;2467a7071e00:33405 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-10T13:52:33,822 INFO [RS:0;2467a7071e00:39575 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-10T13:52:33,823 INFO [RS:1;2467a7071e00:42563 {}] hbase.ChoreService(168): Chore ScheduledChore name=2467a7071e00,42563,1731246752340-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:33,823 INFO [RS:0;2467a7071e00:39575 {}] hbase.ChoreService(168): Chore ScheduledChore name=2467a7071e00,39575,1731246752244-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:33,823 INFO [RS:2;2467a7071e00:33405 {}] hbase.ChoreService(168): Chore ScheduledChore name=2467a7071e00,33405,1731246752389-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:33,823 INFO [RS:1;2467a7071e00:42563 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:33,823 INFO [RS:0;2467a7071e00:39575 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:33,823 INFO [RS:2;2467a7071e00:33405 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:33,824 INFO [RS:1;2467a7071e00:42563 {}] regionserver.Replication(171): 2467a7071e00,42563,1731246752340 started 2024-11-10T13:52:33,824 INFO [RS:0;2467a7071e00:39575 {}] regionserver.Replication(171): 2467a7071e00,39575,1731246752244 started 2024-11-10T13:52:33,824 INFO [RS:2;2467a7071e00:33405 {}] regionserver.Replication(171): 2467a7071e00,33405,1731246752389 started 2024-11-10T13:52:33,840 INFO [RS:2;2467a7071e00:33405 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:33,840 INFO [RS:1;2467a7071e00:42563 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:33,840 INFO [RS:2;2467a7071e00:33405 {}] regionserver.HRegionServer(1482): Serving as 2467a7071e00,33405,1731246752389, RpcServer on 2467a7071e00/172.17.0.3:33405, sessionid=0x1012504f7810003 2024-11-10T13:52:33,841 INFO [RS:1;2467a7071e00:42563 {}] regionserver.HRegionServer(1482): Serving as 2467a7071e00,42563,1731246752340, RpcServer on 2467a7071e00/172.17.0.3:42563, sessionid=0x1012504f7810002 2024-11-10T13:52:33,841 DEBUG [RS:2;2467a7071e00:33405 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-10T13:52:33,841 DEBUG [RS:1;2467a7071e00:42563 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-10T13:52:33,841 DEBUG [RS:2;2467a7071e00:33405 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2467a7071e00,33405,1731246752389 2024-11-10T13:52:33,841 DEBUG [RS:1;2467a7071e00:42563 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2467a7071e00,42563,1731246752340 2024-11-10T13:52:33,841 DEBUG [RS:1;2467a7071e00:42563 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2467a7071e00,42563,1731246752340' 2024-11-10T13:52:33,841 DEBUG [RS:2;2467a7071e00:33405 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2467a7071e00,33405,1731246752389' 2024-11-10T13:52:33,842 DEBUG [RS:1;2467a7071e00:42563 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-10T13:52:33,842 DEBUG [RS:2;2467a7071e00:33405 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-10T13:52:33,842 DEBUG [RS:1;2467a7071e00:42563 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-10T13:52:33,842 DEBUG [RS:2;2467a7071e00:33405 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-10T13:52:33,843 DEBUG [RS:1;2467a7071e00:42563 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-10T13:52:33,843 DEBUG [RS:2;2467a7071e00:33405 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-10T13:52:33,843 DEBUG [RS:1;2467a7071e00:42563 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-10T13:52:33,843 DEBUG [RS:2;2467a7071e00:33405 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-10T13:52:33,843 DEBUG [RS:1;2467a7071e00:42563 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2467a7071e00,42563,1731246752340 2024-11-10T13:52:33,843 DEBUG [RS:2;2467a7071e00:33405 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2467a7071e00,33405,1731246752389 2024-11-10T13:52:33,843 DEBUG [RS:1;2467a7071e00:42563 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2467a7071e00,42563,1731246752340' 2024-11-10T13:52:33,843 DEBUG [RS:2;2467a7071e00:33405 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2467a7071e00,33405,1731246752389' 2024-11-10T13:52:33,843 DEBUG [RS:1;2467a7071e00:42563 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-10T13:52:33,843 DEBUG [RS:2;2467a7071e00:33405 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-10T13:52:33,843 INFO [RS:0;2467a7071e00:39575 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:33,844 INFO [RS:0;2467a7071e00:39575 {}] regionserver.HRegionServer(1482): Serving as 2467a7071e00,39575,1731246752244, RpcServer on 2467a7071e00/172.17.0.3:39575, sessionid=0x1012504f7810001 2024-11-10T13:52:33,844 DEBUG [RS:0;2467a7071e00:39575 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-10T13:52:33,844 DEBUG [RS:1;2467a7071e00:42563 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-10T13:52:33,844 DEBUG [RS:2;2467a7071e00:33405 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-10T13:52:33,844 DEBUG [RS:0;2467a7071e00:39575 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2467a7071e00,39575,1731246752244 2024-11-10T13:52:33,844 DEBUG [RS:0;2467a7071e00:39575 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2467a7071e00,39575,1731246752244' 2024-11-10T13:52:33,844 DEBUG [RS:0;2467a7071e00:39575 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-10T13:52:33,844 DEBUG [RS:2;2467a7071e00:33405 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-10T13:52:33,844 INFO [RS:2;2467a7071e00:33405 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-10T13:52:33,844 DEBUG [RS:1;2467a7071e00:42563 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-10T13:52:33,845 DEBUG [RS:0;2467a7071e00:39575 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-10T13:52:33,845 INFO [RS:1;2467a7071e00:42563 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-10T13:52:33,845 INFO [RS:2;2467a7071e00:33405 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-10T13:52:33,845 INFO [RS:1;2467a7071e00:42563 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-10T13:52:33,845 DEBUG [RS:0;2467a7071e00:39575 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-10T13:52:33,845 DEBUG [RS:0;2467a7071e00:39575 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-10T13:52:33,845 DEBUG [RS:0;2467a7071e00:39575 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2467a7071e00,39575,1731246752244 2024-11-10T13:52:33,845 DEBUG [RS:0;2467a7071e00:39575 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2467a7071e00,39575,1731246752244' 2024-11-10T13:52:33,845 DEBUG [RS:0;2467a7071e00:39575 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-10T13:52:33,846 DEBUG [RS:0;2467a7071e00:39575 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-10T13:52:33,846 DEBUG [RS:0;2467a7071e00:39575 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-10T13:52:33,846 INFO [RS:0;2467a7071e00:39575 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-10T13:52:33,846 INFO [RS:0;2467a7071e00:39575 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-10T13:52:33,958 INFO [RS:0;2467a7071e00:39575 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-10T13:52:33,958 INFO [RS:2;2467a7071e00:33405 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-10T13:52:33,958 INFO [RS:1;2467a7071e00:42563 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-10T13:52:33,961 INFO [RS:2;2467a7071e00:33405 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2467a7071e00%2C33405%2C1731246752389, suffix=, logDir=hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/WALs/2467a7071e00,33405,1731246752389, archiveDir=hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/oldWALs, maxLogs=32 2024-11-10T13:52:33,961 INFO [RS:1;2467a7071e00:42563 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2467a7071e00%2C42563%2C1731246752340, suffix=, logDir=hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/WALs/2467a7071e00,42563,1731246752340, archiveDir=hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/oldWALs, maxLogs=32 2024-11-10T13:52:33,961 INFO [RS:0;2467a7071e00:39575 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2467a7071e00%2C39575%2C1731246752244, suffix=, logDir=hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/WALs/2467a7071e00,39575,1731246752244, archiveDir=hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/oldWALs, maxLogs=32 2024-11-10T13:52:33,977 DEBUG [RS:2;2467a7071e00:33405 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/WALs/2467a7071e00,33405,1731246752389/2467a7071e00%2C33405%2C1731246752389.1731246753965, exclude list is [], retry=0 2024-11-10T13:52:33,977 DEBUG [RS:1;2467a7071e00:42563 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/WALs/2467a7071e00,42563,1731246752340/2467a7071e00%2C42563%2C1731246752340.1731246753965, exclude list is [], retry=0 2024-11-10T13:52:33,979 DEBUG [RS:0;2467a7071e00:39575 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/WALs/2467a7071e00,39575,1731246752244/2467a7071e00%2C39575%2C1731246752244.1731246753965, exclude list is [], retry=0 2024-11-10T13:52:33,982 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35197,DS-1ac35774-1f86-4084-b502-e4543384b1ee,DISK] 2024-11-10T13:52:33,982 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44149,DS-895ba12e-f283-45f3-aa37-983c6b6504b2,DISK] 2024-11-10T13:52:33,982 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40371,DS-e12fe57f-d5be-46d5-8491-b3b368da2951,DISK] 2024-11-10T13:52:33,982 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40371,DS-e12fe57f-d5be-46d5-8491-b3b368da2951,DISK] 2024-11-10T13:52:33,982 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44149,DS-895ba12e-f283-45f3-aa37-983c6b6504b2,DISK] 2024-11-10T13:52:33,983 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35197,DS-1ac35774-1f86-4084-b502-e4543384b1ee,DISK] 2024-11-10T13:52:34,023 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40371,DS-e12fe57f-d5be-46d5-8491-b3b368da2951,DISK] 2024-11-10T13:52:34,023 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44149,DS-895ba12e-f283-45f3-aa37-983c6b6504b2,DISK] 2024-11-10T13:52:34,024 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35197,DS-1ac35774-1f86-4084-b502-e4543384b1ee,DISK] 2024-11-10T13:52:34,028 INFO [RS:2;2467a7071e00:33405 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/WALs/2467a7071e00,33405,1731246752389/2467a7071e00%2C33405%2C1731246752389.1731246753965 2024-11-10T13:52:34,029 DEBUG [RS:2;2467a7071e00:33405 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43429:43429),(127.0.0.1/127.0.0.1:40043:40043),(127.0.0.1/127.0.0.1:37243:37243)] 2024-11-10T13:52:34,030 INFO [RS:0;2467a7071e00:39575 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/WALs/2467a7071e00,39575,1731246752244/2467a7071e00%2C39575%2C1731246752244.1731246753965 2024-11-10T13:52:34,031 DEBUG [RS:0;2467a7071e00:39575 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40043:40043),(127.0.0.1/127.0.0.1:37243:37243),(127.0.0.1/127.0.0.1:43429:43429)] 2024-11-10T13:52:34,032 INFO [RS:1;2467a7071e00:42563 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/WALs/2467a7071e00,42563,1731246752340/2467a7071e00%2C42563%2C1731246752340.1731246753965 2024-11-10T13:52:34,032 DEBUG [RS:1;2467a7071e00:42563 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37243:37243),(127.0.0.1/127.0.0.1:43429:43429),(127.0.0.1/127.0.0.1:40043:40043)] 2024-11-10T13:52:34,073 DEBUG [2467a7071e00:36081 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-10T13:52:34,082 DEBUG [2467a7071e00:36081 {}] balancer.BalancerClusterState(204): Hosts are {2467a7071e00=0} racks are {/default-rack=0} 2024-11-10T13:52:34,089 DEBUG [2467a7071e00:36081 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-10T13:52:34,089 DEBUG [2467a7071e00:36081 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-10T13:52:34,089 DEBUG [2467a7071e00:36081 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-10T13:52:34,089 DEBUG [2467a7071e00:36081 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-10T13:52:34,089 DEBUG [2467a7071e00:36081 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-10T13:52:34,089 DEBUG [2467a7071e00:36081 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-10T13:52:34,089 INFO [2467a7071e00:36081 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-10T13:52:34,089 INFO [2467a7071e00:36081 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-10T13:52:34,089 INFO [2467a7071e00:36081 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-10T13:52:34,089 DEBUG [2467a7071e00:36081 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-10T13:52:34,095 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=2467a7071e00,42563,1731246752340 2024-11-10T13:52:34,102 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2467a7071e00,42563,1731246752340, state=OPENING 2024-11-10T13:52:34,156 DEBUG [PEWorker-4 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-10T13:52:34,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42563-0x1012504f7810002, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:34,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33405-0x1012504f7810003, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:34,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39575-0x1012504f7810001, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:34,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36081-0x1012504f7810000, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:34,167 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T13:52:34,167 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T13:52:34,167 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T13:52:34,167 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T13:52:34,169 DEBUG [PEWorker-4 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-10T13:52:34,170 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=2467a7071e00,42563,1731246752340}] 2024-11-10T13:52:34,349 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-10T13:52:34,352 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53637, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-10T13:52:34,363 INFO [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-10T13:52:34,363 INFO [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-10T13:52:34,364 INFO [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-10T13:52:34,367 INFO [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2467a7071e00%2C42563%2C1731246752340.meta, suffix=.meta, logDir=hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/WALs/2467a7071e00,42563,1731246752340, archiveDir=hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/oldWALs, maxLogs=32 2024-11-10T13:52:34,381 DEBUG [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/WALs/2467a7071e00,42563,1731246752340/2467a7071e00%2C42563%2C1731246752340.meta.1731246754369.meta, exclude list is [], retry=0 2024-11-10T13:52:34,385 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40371,DS-e12fe57f-d5be-46d5-8491-b3b368da2951,DISK] 2024-11-10T13:52:34,385 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44149,DS-895ba12e-f283-45f3-aa37-983c6b6504b2,DISK] 2024-11-10T13:52:34,385 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35197,DS-1ac35774-1f86-4084-b502-e4543384b1ee,DISK] 2024-11-10T13:52:34,388 INFO [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/WALs/2467a7071e00,42563,1731246752340/2467a7071e00%2C42563%2C1731246752340.meta.1731246754369.meta 2024-11-10T13:52:34,389 DEBUG [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40043:40043),(127.0.0.1/127.0.0.1:43429:43429),(127.0.0.1/127.0.0.1:37243:37243)] 2024-11-10T13:52:34,389 DEBUG [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-10T13:52:34,390 DEBUG [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-10T13:52:34,393 DEBUG [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-10T13:52:34,397 INFO [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-10T13:52:34,401 DEBUG [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-10T13:52:34,401 DEBUG [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T13:52:34,402 DEBUG [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-10T13:52:34,402 DEBUG [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-10T13:52:34,405 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-10T13:52:34,406 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-10T13:52:34,407 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:52:34,407 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:52:34,408 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-10T13:52:34,409 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-10T13:52:34,409 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:52:34,410 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:52:34,410 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-10T13:52:34,411 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-10T13:52:34,411 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:52:34,412 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:52:34,413 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-10T13:52:34,414 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-10T13:52:34,414 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:52:34,415 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:52:34,415 DEBUG [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-10T13:52:34,416 DEBUG [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/data/hbase/meta/1588230740 2024-11-10T13:52:34,419 DEBUG [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/data/hbase/meta/1588230740 2024-11-10T13:52:34,422 DEBUG [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-10T13:52:34,422 DEBUG [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-10T13:52:34,423 DEBUG [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-10T13:52:34,426 DEBUG [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-10T13:52:34,428 INFO [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64702920, jitterRate=-0.035851359367370605}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-10T13:52:34,428 DEBUG [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-10T13:52:34,430 DEBUG [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731246754402Writing region info on filesystem at 1731246754402Initializing all the Stores at 1731246754404 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731246754405 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731246754405Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731246754405Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731246754405Cleaning up temporary data from old regions at 1731246754422 (+17 ms)Running coprocessor post-open hooks at 1731246754428 (+6 ms)Region opened successfully at 1731246754430 (+2 ms) 2024-11-10T13:52:34,438 INFO [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731246754341 2024-11-10T13:52:34,450 DEBUG [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-10T13:52:34,451 INFO [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-10T13:52:34,452 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=2467a7071e00,42563,1731246752340 2024-11-10T13:52:34,455 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2467a7071e00,42563,1731246752340, state=OPEN 2024-11-10T13:52:34,503 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33405-0x1012504f7810003, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T13:52:34,503 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39575-0x1012504f7810001, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T13:52:34,503 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36081-0x1012504f7810000, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T13:52:34,504 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T13:52:34,504 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T13:52:34,504 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T13:52:34,503 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42563-0x1012504f7810002, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T13:52:34,504 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T13:52:34,504 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=2467a7071e00,42563,1731246752340 2024-11-10T13:52:34,513 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-10T13:52:34,514 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=2467a7071e00,42563,1731246752340 in 334 msec 2024-11-10T13:52:34,522 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-10T13:52:34,522 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 860 msec 2024-11-10T13:52:34,523 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T13:52:34,523 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-10T13:52:34,541 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-10T13:52:34,542 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2467a7071e00,42563,1731246752340, seqNum=-1] 2024-11-10T13:52:34,587 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T13:52:34,589 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60481, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T13:52:34,618 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1650 sec 2024-11-10T13:52:34,618 INFO [master/2467a7071e00:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731246754618, completionTime=-1 2024-11-10T13:52:34,621 INFO [master/2467a7071e00:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-10T13:52:34,622 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-10T13:52:34,646 INFO [master/2467a7071e00:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=3 2024-11-10T13:52:34,646 INFO [master/2467a7071e00:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731246814646 2024-11-10T13:52:34,646 INFO [master/2467a7071e00:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731246874646 2024-11-10T13:52:34,647 INFO [master/2467a7071e00:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 24 msec 2024-11-10T13:52:34,648 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-10T13:52:34,656 INFO [master/2467a7071e00:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2467a7071e00,36081,1731246751482-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:34,656 INFO [master/2467a7071e00:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2467a7071e00,36081,1731246751482-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:34,657 INFO [master/2467a7071e00:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2467a7071e00,36081,1731246751482-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:34,658 INFO [master/2467a7071e00:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-2467a7071e00:36081, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:34,658 INFO [master/2467a7071e00:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:34,659 INFO [master/2467a7071e00:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:34,664 DEBUG [master/2467a7071e00:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-10T13:52:34,689 INFO [master/2467a7071e00:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.184sec 2024-11-10T13:52:34,691 INFO [master/2467a7071e00:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-10T13:52:34,692 INFO [master/2467a7071e00:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-10T13:52:34,693 INFO [master/2467a7071e00:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-10T13:52:34,694 INFO [master/2467a7071e00:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-10T13:52:34,694 INFO [master/2467a7071e00:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-10T13:52:34,695 INFO [master/2467a7071e00:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2467a7071e00,36081,1731246751482-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T13:52:34,695 INFO [master/2467a7071e00:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2467a7071e00,36081,1731246751482-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-10T13:52:34,700 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-10T13:52:34,701 INFO [master/2467a7071e00:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-10T13:52:34,701 INFO [master/2467a7071e00:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2467a7071e00,36081,1731246751482-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:34,736 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@59ec2003, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T13:52:34,740 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-10T13:52:34,740 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-10T13:52:34,743 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 2467a7071e00,36081,-1 for getting cluster id 2024-11-10T13:52:34,745 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-10T13:52:34,753 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '13a9d572-6f12-4046-9ea8-2328c6ba1309' 2024-11-10T13:52:34,755 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-10T13:52:34,755 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "13a9d572-6f12-4046-9ea8-2328c6ba1309" 2024-11-10T13:52:34,757 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4a0dec0d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T13:52:34,757 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2467a7071e00,36081,-1] 2024-11-10T13:52:34,760 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-10T13:52:34,761 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:52:34,762 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42092, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-10T13:52:34,765 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6bbbb516, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T13:52:34,766 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-10T13:52:34,772 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2467a7071e00,42563,1731246752340, seqNum=-1] 2024-11-10T13:52:34,773 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T13:52:34,775 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56816, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T13:52:34,794 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=2467a7071e00,36081,1731246751482 2024-11-10T13:52:34,799 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-10T13:52:34,803 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 2467a7071e00,36081,1731246751482 2024-11-10T13:52:34,805 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@f958a0d 2024-11-10T13:52:34,806 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-10T13:52:34,808 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42098, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-10T13:52:34,814 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36081 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-10T13:52:34,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36081 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-11-10T13:52:34,824 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-11-10T13:52:34,826 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36081 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-11-10T13:52:34,827 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:52:34,830 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-10T13:52:34,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36081 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T13:52:34,838 WARN [PEWorker-4 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T13:52:34,838 WARN [PEWorker-4 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T13:52:34,841 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-180646471_22 at /127.0.0.1:48452 [Receiving block BP-1365254765-172.17.0.3-1731246746770:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:35197:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48452 dst: /127.0.0.1:35197 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:52:34,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35197 is added to blk_-9223372036854775680_1021 (size=392) 2024-11-10T13:52:34,852 WARN [PEWorker-4 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T13:52:34,855 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 3bbdebb8a1241fe1859bd6fe2a214353, NAME => 'TestHBaseWalOnEC,,1731246754810.3bbdebb8a1241fe1859bd6fe2a214353.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7 2024-11-10T13:52:34,860 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T13:52:34,861 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T13:52:34,863 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-180646471_22 at /127.0.0.1:48472 [Receiving block BP-1365254765-172.17.0.3-1731246746770:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:35197:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48472 dst: /127.0.0.1:35197 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:52:34,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35197 is added to blk_-9223372036854775664_1023 (size=51) 2024-11-10T13:52:34,870 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T13:52:34,871 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731246754810.3bbdebb8a1241fe1859bd6fe2a214353.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T13:52:34,871 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 3bbdebb8a1241fe1859bd6fe2a214353, disabling compactions & flushes 2024-11-10T13:52:34,871 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731246754810.3bbdebb8a1241fe1859bd6fe2a214353. 2024-11-10T13:52:34,871 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731246754810.3bbdebb8a1241fe1859bd6fe2a214353. 2024-11-10T13:52:34,871 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731246754810.3bbdebb8a1241fe1859bd6fe2a214353. after waiting 0 ms 2024-11-10T13:52:34,871 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731246754810.3bbdebb8a1241fe1859bd6fe2a214353. 2024-11-10T13:52:34,871 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731246754810.3bbdebb8a1241fe1859bd6fe2a214353. 2024-11-10T13:52:34,871 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 3bbdebb8a1241fe1859bd6fe2a214353: Waiting for close lock at 1731246754871Disabling compacts and flushes for region at 1731246754871Disabling writes for close at 1731246754871Writing region close event to WAL at 1731246754871Closed at 1731246754871 2024-11-10T13:52:34,873 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-11-10T13:52:34,878 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1731246754810.3bbdebb8a1241fe1859bd6fe2a214353.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1731246754873"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731246754873"}]},"ts":"1731246754873"} 2024-11-10T13:52:34,883 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-10T13:52:34,884 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-10T13:52:34,887 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731246754885"}]},"ts":"1731246754885"} 2024-11-10T13:52:34,891 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-11-10T13:52:34,892 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {2467a7071e00=0} racks are {/default-rack=0} 2024-11-10T13:52:34,893 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-10T13:52:34,893 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-10T13:52:34,893 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-10T13:52:34,893 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-10T13:52:34,893 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-10T13:52:34,893 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-10T13:52:34,893 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-10T13:52:34,893 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-10T13:52:34,893 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-10T13:52:34,893 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-10T13:52:34,895 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=3bbdebb8a1241fe1859bd6fe2a214353, ASSIGN}] 2024-11-10T13:52:34,897 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=3bbdebb8a1241fe1859bd6fe2a214353, ASSIGN 2024-11-10T13:52:34,899 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=3bbdebb8a1241fe1859bd6fe2a214353, ASSIGN; state=OFFLINE, location=2467a7071e00,33405,1731246752389; forceNewPlan=false, retain=false 2024-11-10T13:52:34,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36081 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T13:52:35,054 INFO [2467a7071e00:36081 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-10T13:52:35,056 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=3bbdebb8a1241fe1859bd6fe2a214353, regionState=OPENING, regionLocation=2467a7071e00,33405,1731246752389 2024-11-10T13:52:35,063 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=3bbdebb8a1241fe1859bd6fe2a214353, ASSIGN because future has completed 2024-11-10T13:52:35,064 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3bbdebb8a1241fe1859bd6fe2a214353, server=2467a7071e00,33405,1731246752389}] 2024-11-10T13:52:35,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36081 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T13:52:35,219 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-10T13:52:35,223 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43511, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-10T13:52:35,230 INFO [RS_OPEN_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1731246754810.3bbdebb8a1241fe1859bd6fe2a214353. 2024-11-10T13:52:35,231 DEBUG [RS_OPEN_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 3bbdebb8a1241fe1859bd6fe2a214353, NAME => 'TestHBaseWalOnEC,,1731246754810.3bbdebb8a1241fe1859bd6fe2a214353.', STARTKEY => '', ENDKEY => ''} 2024-11-10T13:52:35,231 DEBUG [RS_OPEN_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 3bbdebb8a1241fe1859bd6fe2a214353 2024-11-10T13:52:35,232 DEBUG [RS_OPEN_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731246754810.3bbdebb8a1241fe1859bd6fe2a214353.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T13:52:35,232 DEBUG [RS_OPEN_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 3bbdebb8a1241fe1859bd6fe2a214353 2024-11-10T13:52:35,232 DEBUG [RS_OPEN_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 3bbdebb8a1241fe1859bd6fe2a214353 2024-11-10T13:52:35,235 INFO [StoreOpener-3bbdebb8a1241fe1859bd6fe2a214353-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 3bbdebb8a1241fe1859bd6fe2a214353 2024-11-10T13:52:35,238 INFO [StoreOpener-3bbdebb8a1241fe1859bd6fe2a214353-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3bbdebb8a1241fe1859bd6fe2a214353 columnFamilyName cf 2024-11-10T13:52:35,238 DEBUG [StoreOpener-3bbdebb8a1241fe1859bd6fe2a214353-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:52:35,239 INFO [StoreOpener-3bbdebb8a1241fe1859bd6fe2a214353-1 {}] regionserver.HStore(327): Store=3bbdebb8a1241fe1859bd6fe2a214353/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T13:52:35,239 DEBUG [RS_OPEN_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 3bbdebb8a1241fe1859bd6fe2a214353 2024-11-10T13:52:35,241 DEBUG [RS_OPEN_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/data/default/TestHBaseWalOnEC/3bbdebb8a1241fe1859bd6fe2a214353 2024-11-10T13:52:35,242 DEBUG [RS_OPEN_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/data/default/TestHBaseWalOnEC/3bbdebb8a1241fe1859bd6fe2a214353 2024-11-10T13:52:35,242 DEBUG [RS_OPEN_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 3bbdebb8a1241fe1859bd6fe2a214353 2024-11-10T13:52:35,242 DEBUG [RS_OPEN_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 3bbdebb8a1241fe1859bd6fe2a214353 2024-11-10T13:52:35,246 DEBUG [RS_OPEN_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 3bbdebb8a1241fe1859bd6fe2a214353 2024-11-10T13:52:35,251 DEBUG [RS_OPEN_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/data/default/TestHBaseWalOnEC/3bbdebb8a1241fe1859bd6fe2a214353/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T13:52:35,252 INFO [RS_OPEN_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 3bbdebb8a1241fe1859bd6fe2a214353; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66341429, jitterRate=-0.011435672640800476}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-10T13:52:35,252 DEBUG [RS_OPEN_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3bbdebb8a1241fe1859bd6fe2a214353 2024-11-10T13:52:35,253 DEBUG [RS_OPEN_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 3bbdebb8a1241fe1859bd6fe2a214353: Running coprocessor pre-open hook at 1731246755232Writing region info on filesystem at 1731246755232Initializing all the Stores at 1731246755235 (+3 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731246755235Cleaning up temporary data from old regions at 1731246755243 (+8 ms)Running coprocessor post-open hooks at 1731246755252 (+9 ms)Region opened successfully at 1731246755253 (+1 ms) 2024-11-10T13:52:35,255 INFO [RS_OPEN_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1731246754810.3bbdebb8a1241fe1859bd6fe2a214353., pid=6, masterSystemTime=1731246755219 2024-11-10T13:52:35,258 DEBUG [RS_OPEN_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1731246754810.3bbdebb8a1241fe1859bd6fe2a214353. 2024-11-10T13:52:35,258 INFO [RS_OPEN_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1731246754810.3bbdebb8a1241fe1859bd6fe2a214353. 2024-11-10T13:52:35,259 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=3bbdebb8a1241fe1859bd6fe2a214353, regionState=OPEN, openSeqNum=2, regionLocation=2467a7071e00,33405,1731246752389 2024-11-10T13:52:35,263 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3bbdebb8a1241fe1859bd6fe2a214353, server=2467a7071e00,33405,1731246752389 because future has completed 2024-11-10T13:52:35,269 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-10T13:52:35,271 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 3bbdebb8a1241fe1859bd6fe2a214353, server=2467a7071e00,33405,1731246752389 in 201 msec 2024-11-10T13:52:35,273 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-10T13:52:35,273 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=3bbdebb8a1241fe1859bd6fe2a214353, ASSIGN in 374 msec 2024-11-10T13:52:35,275 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-10T13:52:35,275 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731246755275"}]},"ts":"1731246755275"} 2024-11-10T13:52:35,278 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-11-10T13:52:35,280 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-11-10T13:52:35,283 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 464 msec 2024-11-10T13:52:35,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36081 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T13:52:35,461 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-11-10T13:52:35,461 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-11-10T13:52:35,463 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-10T13:52:35,472 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-11-10T13:52:35,473 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-10T13:52:35,474 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-11-10T13:52:35,487 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1731246754810.3bbdebb8a1241fe1859bd6fe2a214353., hostname=2467a7071e00,33405,1731246752389, seqNum=2] 2024-11-10T13:52:35,488 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T13:52:35,490 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43788, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T13:52:35,497 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36081 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestHBaseWalOnEC 2024-11-10T13:52:35,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36081 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-11-10T13:52:35,504 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-11-10T13:52:35,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36081 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-10T13:52:35,506 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-10T13:52:35,507 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-10T13:52:35,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36081 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-10T13:52:35,677 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33405 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-10T13:52:35,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2467a7071e00:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1731246754810.3bbdebb8a1241fe1859bd6fe2a214353. 2024-11-10T13:52:35,681 INFO [RS_FLUSH_OPERATIONS-regionserver/2467a7071e00:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 3bbdebb8a1241fe1859bd6fe2a214353 1/1 column families, dataSize=32 B heapSize=360 B 2024-11-10T13:52:35,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2467a7071e00:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/data/default/TestHBaseWalOnEC/3bbdebb8a1241fe1859bd6fe2a214353/.tmp/cf/5276379db84246a8a7af004a08e0811f is 36, key is row/cf:cq/1731246755491/Put/seqid=0 2024-11-10T13:52:35,733 WARN [RS_FLUSH_OPERATIONS-regionserver/2467a7071e00:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T13:52:35,733 WARN [RS_FLUSH_OPERATIONS-regionserver/2467a7071e00:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T13:52:35,737 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_269217626_22 at /127.0.0.1:37908 [Receiving block BP-1365254765-172.17.0.3-1731246746770:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:44149:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37908 dst: /127.0.0.1:44149 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:52:35,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44149 is added to blk_-9223372036854775648_1025 (size=4787) 2024-11-10T13:52:35,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36081 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-10T13:52:36,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36081 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-10T13:52:36,145 WARN [RS_FLUSH_OPERATIONS-regionserver/2467a7071e00:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T13:52:36,145 INFO [RS_FLUSH_OPERATIONS-regionserver/2467a7071e00:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/data/default/TestHBaseWalOnEC/3bbdebb8a1241fe1859bd6fe2a214353/.tmp/cf/5276379db84246a8a7af004a08e0811f 2024-11-10T13:52:36,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2467a7071e00:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/data/default/TestHBaseWalOnEC/3bbdebb8a1241fe1859bd6fe2a214353/.tmp/cf/5276379db84246a8a7af004a08e0811f as hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/data/default/TestHBaseWalOnEC/3bbdebb8a1241fe1859bd6fe2a214353/cf/5276379db84246a8a7af004a08e0811f 2024-11-10T13:52:36,198 INFO [RS_FLUSH_OPERATIONS-regionserver/2467a7071e00:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/data/default/TestHBaseWalOnEC/3bbdebb8a1241fe1859bd6fe2a214353/cf/5276379db84246a8a7af004a08e0811f, entries=1, sequenceid=5, filesize=4.7 K 2024-11-10T13:52:36,206 INFO [RS_FLUSH_OPERATIONS-regionserver/2467a7071e00:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 3bbdebb8a1241fe1859bd6fe2a214353 in 523ms, sequenceid=5, compaction requested=false 2024-11-10T13:52:36,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2467a7071e00:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-11-10T13:52:36,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2467a7071e00:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 3bbdebb8a1241fe1859bd6fe2a214353: 2024-11-10T13:52:36,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2467a7071e00:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1731246754810.3bbdebb8a1241fe1859bd6fe2a214353. 2024-11-10T13:52:36,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2467a7071e00:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-10T13:52:36,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36081 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-10T13:52:36,217 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-10T13:52:36,217 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 706 msec 2024-11-10T13:52:36,222 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 720 msec 2024-11-10T13:52:36,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_-9223372036854775757_1006 (size=196) 2024-11-10T13:52:36,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44149 is added to blk_-9223372036854775756_1006 (size=196) 2024-11-10T13:52:36,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35197 is added to blk_-9223372036854775725_1010 (size=34) 2024-11-10T13:52:36,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44149 is added to blk_-9223372036854775724_1010 (size=34) 2024-11-10T13:52:36,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35197 is added to blk_-9223372036854775709_1013 (size=1321) 2024-11-10T13:52:36,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_-9223372036854775708_1013 (size=1321) 2024-11-10T13:52:36,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44149 is added to blk_-9223372036854775773_1004 (size=42) 2024-11-10T13:52:36,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_-9223372036854775772_1004 (size=42) 2024-11-10T13:52:36,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35197 is added to blk_-9223372036854775740_1008 (size=1189) 2024-11-10T13:52:36,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44149 is added to blk_-9223372036854775741_1008 (size=1189) 2024-11-10T13:52:36,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36081 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-10T13:52:36,640 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-11-10T13:52:36,651 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-10T13:52:36,651 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-10T13:52:36,652 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T13:52:36,656 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:52:36,657 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:52:36,657 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-10T13:52:36,657 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-10T13:52:36,657 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=245640440, stopped=false 2024-11-10T13:52:36,658 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=2467a7071e00,36081,1731246751482 2024-11-10T13:52:36,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39575-0x1012504f7810001, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T13:52:36,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42563-0x1012504f7810002, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T13:52:36,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36081-0x1012504f7810000, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T13:52:36,725 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33405-0x1012504f7810003, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T13:52:36,725 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39575-0x1012504f7810001, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:36,725 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33405-0x1012504f7810003, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:36,725 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36081-0x1012504f7810000, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:36,725 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42563-0x1012504f7810002, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:36,725 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-10T13:52:36,726 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-10T13:52:36,727 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39575-0x1012504f7810001, quorum=127.0.0.1:62793, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T13:52:36,727 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T13:52:36,727 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:52:36,727 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42563-0x1012504f7810002, quorum=127.0.0.1:62793, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T13:52:36,727 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33405-0x1012504f7810003, quorum=127.0.0.1:62793, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T13:52:36,727 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36081-0x1012504f7810000, quorum=127.0.0.1:62793, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T13:52:36,728 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2467a7071e00,39575,1731246752244' ***** 2024-11-10T13:52:36,728 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-10T13:52:36,728 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2467a7071e00,42563,1731246752340' ***** 2024-11-10T13:52:36,728 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-10T13:52:36,728 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2467a7071e00,33405,1731246752389' ***** 2024-11-10T13:52:36,729 INFO [RS:1;2467a7071e00:42563 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-10T13:52:36,729 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-10T13:52:36,729 INFO [RS:0;2467a7071e00:39575 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-10T13:52:36,729 INFO [RS:1;2467a7071e00:42563 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-10T13:52:36,729 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-10T13:52:36,729 INFO [RS:0;2467a7071e00:39575 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-10T13:52:36,729 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-10T13:52:36,729 INFO [RS:1;2467a7071e00:42563 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-10T13:52:36,730 INFO [RS:0;2467a7071e00:39575 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-10T13:52:36,730 INFO [RS:1;2467a7071e00:42563 {}] regionserver.HRegionServer(959): stopping server 2467a7071e00,42563,1731246752340 2024-11-10T13:52:36,730 INFO [RS:0;2467a7071e00:39575 {}] regionserver.HRegionServer(959): stopping server 2467a7071e00,39575,1731246752244 2024-11-10T13:52:36,730 INFO [RS:2;2467a7071e00:33405 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-10T13:52:36,730 INFO [RS:0;2467a7071e00:39575 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T13:52:36,730 INFO [RS:1;2467a7071e00:42563 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T13:52:36,730 INFO [RS:2;2467a7071e00:33405 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-10T13:52:36,730 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-10T13:52:36,730 INFO [RS:0;2467a7071e00:39575 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;2467a7071e00:39575. 2024-11-10T13:52:36,730 INFO [RS:1;2467a7071e00:42563 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;2467a7071e00:42563. 2024-11-10T13:52:36,730 INFO [RS:2;2467a7071e00:33405 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-10T13:52:36,730 DEBUG [RS:0;2467a7071e00:39575 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T13:52:36,730 DEBUG [RS:1;2467a7071e00:42563 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T13:52:36,730 DEBUG [RS:0;2467a7071e00:39575 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:52:36,730 INFO [RS:2;2467a7071e00:33405 {}] regionserver.HRegionServer(3091): Received CLOSE for 3bbdebb8a1241fe1859bd6fe2a214353 2024-11-10T13:52:36,730 DEBUG [RS:1;2467a7071e00:42563 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:52:36,731 INFO [RS:0;2467a7071e00:39575 {}] regionserver.HRegionServer(976): stopping server 2467a7071e00,39575,1731246752244; all regions closed. 2024-11-10T13:52:36,731 INFO [RS:1;2467a7071e00:42563 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-10T13:52:36,731 INFO [RS:1;2467a7071e00:42563 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-10T13:52:36,731 INFO [RS:1;2467a7071e00:42563 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-10T13:52:36,731 INFO [RS:2;2467a7071e00:33405 {}] regionserver.HRegionServer(959): stopping server 2467a7071e00,33405,1731246752389 2024-11-10T13:52:36,731 INFO [RS:1;2467a7071e00:42563 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-10T13:52:36,731 INFO [RS:2;2467a7071e00:33405 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T13:52:36,731 INFO [RS:2;2467a7071e00:33405 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;2467a7071e00:33405. 2024-11-10T13:52:36,731 DEBUG [RS:2;2467a7071e00:33405 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T13:52:36,732 DEBUG [RS:2;2467a7071e00:33405 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:52:36,732 INFO [RS:1;2467a7071e00:42563 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-10T13:52:36,732 DEBUG [RS:1;2467a7071e00:42563 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-10T13:52:36,732 DEBUG [RS_CLOSE_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 3bbdebb8a1241fe1859bd6fe2a214353, disabling compactions & flushes 2024-11-10T13:52:36,732 INFO [RS_CLOSE_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731246754810.3bbdebb8a1241fe1859bd6fe2a214353. 2024-11-10T13:52:36,732 INFO [RS:2;2467a7071e00:33405 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-10T13:52:36,732 DEBUG [RS:2;2467a7071e00:33405 {}] regionserver.HRegionServer(1325): Online Regions={3bbdebb8a1241fe1859bd6fe2a214353=TestHBaseWalOnEC,,1731246754810.3bbdebb8a1241fe1859bd6fe2a214353.} 2024-11-10T13:52:36,732 DEBUG [RS_CLOSE_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731246754810.3bbdebb8a1241fe1859bd6fe2a214353. 2024-11-10T13:52:36,732 DEBUG [RS_CLOSE_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731246754810.3bbdebb8a1241fe1859bd6fe2a214353. after waiting 0 ms 2024-11-10T13:52:36,732 DEBUG [RS:1;2467a7071e00:42563 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-10T13:52:36,732 DEBUG [RS_CLOSE_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731246754810.3bbdebb8a1241fe1859bd6fe2a214353. 2024-11-10T13:52:36,732 DEBUG [RS:2;2467a7071e00:33405 {}] regionserver.HRegionServer(1351): Waiting on 3bbdebb8a1241fe1859bd6fe2a214353 2024-11-10T13:52:36,733 DEBUG [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-10T13:52:36,733 INFO [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-10T13:52:36,733 DEBUG [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-10T13:52:36,733 DEBUG [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-10T13:52:36,733 DEBUG [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-10T13:52:36,734 INFO [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-11-10T13:52:36,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44149 is added to blk_1073741828_1018 (size=93) 2024-11-10T13:52:36,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35197 is added to blk_1073741828_1018 (size=93) 2024-11-10T13:52:36,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741828_1018 (size=93) 2024-11-10T13:52:36,746 DEBUG [RS:0;2467a7071e00:39575 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/oldWALs 2024-11-10T13:52:36,746 INFO [RS:0;2467a7071e00:39575 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 2467a7071e00%2C39575%2C1731246752244:(num 1731246753965) 2024-11-10T13:52:36,746 DEBUG [RS:0;2467a7071e00:39575 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:52:36,746 INFO [RS:0;2467a7071e00:39575 {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T13:52:36,746 INFO [RS:0;2467a7071e00:39575 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T13:52:36,746 INFO [RS:0;2467a7071e00:39575 {}] hbase.ChoreService(370): Chore service for: regionserver/2467a7071e00:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-10T13:52:36,747 INFO [RS:0;2467a7071e00:39575 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-10T13:52:36,747 INFO [RS:0;2467a7071e00:39575 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-10T13:52:36,747 INFO [regionserver/2467a7071e00:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T13:52:36,747 INFO [RS:0;2467a7071e00:39575 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-10T13:52:36,747 INFO [RS:0;2467a7071e00:39575 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T13:52:36,747 INFO [RS:0;2467a7071e00:39575 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:39575 2024-11-10T13:52:36,754 DEBUG [RS_CLOSE_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/data/default/TestHBaseWalOnEC/3bbdebb8a1241fe1859bd6fe2a214353/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-10T13:52:36,755 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39575-0x1012504f7810001, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2467a7071e00,39575,1731246752244 2024-11-10T13:52:36,755 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36081-0x1012504f7810000, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T13:52:36,755 INFO [RS:0;2467a7071e00:39575 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T13:52:36,756 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2467a7071e00,39575,1731246752244] 2024-11-10T13:52:36,756 INFO [RS_CLOSE_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731246754810.3bbdebb8a1241fe1859bd6fe2a214353. 2024-11-10T13:52:36,757 DEBUG [RS_CLOSE_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 3bbdebb8a1241fe1859bd6fe2a214353: Waiting for close lock at 1731246756731Running coprocessor pre-close hooks at 1731246756732 (+1 ms)Disabling compacts and flushes for region at 1731246756732Disabling writes for close at 1731246756732Writing region close event to WAL at 1731246756734 (+2 ms)Running coprocessor post-close hooks at 1731246756755 (+21 ms)Closed at 1731246756756 (+1 ms) 2024-11-10T13:52:36,757 DEBUG [RS_CLOSE_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1731246754810.3bbdebb8a1241fe1859bd6fe2a214353. 2024-11-10T13:52:36,764 DEBUG [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/data/hbase/meta/1588230740/.tmp/info/0e1b6b180837467e97911aa2bf858eda is 153, key is TestHBaseWalOnEC,,1731246754810.3bbdebb8a1241fe1859bd6fe2a214353./info:regioninfo/1731246755259/Put/seqid=0 2024-11-10T13:52:36,767 WARN [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T13:52:36,767 WARN [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T13:52:36,772 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1793836329_22 at /127.0.0.1:37970 [Receiving block BP-1365254765-172.17.0.3-1731246746770:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:44149:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37970 dst: /127.0.0.1:44149 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:52:36,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44149 is added to blk_-9223372036854775632_1027 (size=6637) 2024-11-10T13:52:36,776 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2467a7071e00,39575,1731246752244 already deleted, retry=false 2024-11-10T13:52:36,777 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2467a7071e00,39575,1731246752244 expired; onlineServers=2 2024-11-10T13:52:36,777 WARN [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T13:52:36,777 INFO [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/data/hbase/meta/1588230740/.tmp/info/0e1b6b180837467e97911aa2bf858eda 2024-11-10T13:52:36,796 INFO [regionserver/2467a7071e00:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-10T13:52:36,796 INFO [regionserver/2467a7071e00:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-10T13:52:36,803 DEBUG [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/data/hbase/meta/1588230740/.tmp/ns/5e48cc9902624f11a105e2542a870624 is 43, key is default/ns:d/1731246754594/Put/seqid=0 2024-11-10T13:52:36,804 INFO [regionserver/2467a7071e00:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T13:52:36,804 INFO [regionserver/2467a7071e00:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T13:52:36,806 WARN [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T13:52:36,806 WARN [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T13:52:36,806 INFO [regionserver/2467a7071e00:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T13:52:36,809 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1793836329_22 at /127.0.0.1:51480 [Receiving block BP-1365254765-172.17.0.3-1731246746770:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:40371:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51480 dst: /127.0.0.1:40371 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:52:36,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_-9223372036854775616_1029 (size=5153) 2024-11-10T13:52:36,813 WARN [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T13:52:36,813 INFO [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/data/hbase/meta/1588230740/.tmp/ns/5e48cc9902624f11a105e2542a870624 2024-11-10T13:52:36,832 INFO [regionserver/2467a7071e00:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-10T13:52:36,832 INFO [regionserver/2467a7071e00:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-10T13:52:36,841 DEBUG [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/data/hbase/meta/1588230740/.tmp/table/2a9976c785d94c3b880ca42dcbe5c70f is 52, key is TestHBaseWalOnEC/table:state/1731246755275/Put/seqid=0 2024-11-10T13:52:36,843 WARN [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T13:52:36,843 WARN [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T13:52:36,846 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1793836329_22 at /127.0.0.1:48538 [Receiving block BP-1365254765-172.17.0.3-1731246746770:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:35197:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48538 dst: /127.0.0.1:35197 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:52:36,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35197 is added to blk_-9223372036854775600_1031 (size=5249) 2024-11-10T13:52:36,851 WARN [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T13:52:36,851 INFO [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/data/hbase/meta/1588230740/.tmp/table/2a9976c785d94c3b880ca42dcbe5c70f 2024-11-10T13:52:36,863 DEBUG [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/data/hbase/meta/1588230740/.tmp/info/0e1b6b180837467e97911aa2bf858eda as hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/data/hbase/meta/1588230740/info/0e1b6b180837467e97911aa2bf858eda 2024-11-10T13:52:36,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39575-0x1012504f7810001, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T13:52:36,867 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39575-0x1012504f7810001, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T13:52:36,867 INFO [RS:0;2467a7071e00:39575 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T13:52:36,867 INFO [RS:0;2467a7071e00:39575 {}] regionserver.HRegionServer(1031): Exiting; stopping=2467a7071e00,39575,1731246752244; zookeeper connection closed. 2024-11-10T13:52:36,867 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@158be632 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@158be632 2024-11-10T13:52:36,873 INFO [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/data/hbase/meta/1588230740/info/0e1b6b180837467e97911aa2bf858eda, entries=10, sequenceid=11, filesize=6.5 K 2024-11-10T13:52:36,874 DEBUG [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/data/hbase/meta/1588230740/.tmp/ns/5e48cc9902624f11a105e2542a870624 as hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/data/hbase/meta/1588230740/ns/5e48cc9902624f11a105e2542a870624 2024-11-10T13:52:36,884 INFO [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/data/hbase/meta/1588230740/ns/5e48cc9902624f11a105e2542a870624, entries=2, sequenceid=11, filesize=5.0 K 2024-11-10T13:52:36,886 DEBUG [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/data/hbase/meta/1588230740/.tmp/table/2a9976c785d94c3b880ca42dcbe5c70f as hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/data/hbase/meta/1588230740/table/2a9976c785d94c3b880ca42dcbe5c70f 2024-11-10T13:52:36,896 INFO [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/data/hbase/meta/1588230740/table/2a9976c785d94c3b880ca42dcbe5c70f, entries=2, sequenceid=11, filesize=5.1 K 2024-11-10T13:52:36,897 INFO [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 164ms, sequenceid=11, compaction requested=false 2024-11-10T13:52:36,897 DEBUG [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-10T13:52:36,906 DEBUG [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-10T13:52:36,908 DEBUG [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-10T13:52:36,908 INFO [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-10T13:52:36,908 DEBUG [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731246756733Running coprocessor pre-close hooks at 1731246756733Disabling compacts and flushes for region at 1731246756733Disabling writes for close at 1731246756733Obtaining lock to block concurrent updates at 1731246756734 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731246756734Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1731246756735 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731246756736 (+1 ms)Flushing 1588230740/info: creating writer at 1731246756736Flushing 1588230740/info: appending metadata at 1731246756759 (+23 ms)Flushing 1588230740/info: closing flushed file at 1731246756759Flushing 1588230740/ns: creating writer at 1731246756788 (+29 ms)Flushing 1588230740/ns: appending metadata at 1731246756802 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1731246756802Flushing 1588230740/table: creating writer at 1731246756822 (+20 ms)Flushing 1588230740/table: appending metadata at 1731246756840 (+18 ms)Flushing 1588230740/table: closing flushed file at 1731246756840Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@54985a4d: reopening flushed file at 1731246756861 (+21 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6b5f6ac5: reopening flushed file at 1731246756873 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@40422ea8: reopening flushed file at 1731246756884 (+11 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 164ms, sequenceid=11, compaction requested=false at 1731246756897 (+13 ms)Writing region close event to WAL at 1731246756899 (+2 ms)Running coprocessor post-close hooks at 1731246756907 (+8 ms)Closed at 1731246756908 (+1 ms) 2024-11-10T13:52:36,908 DEBUG [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-10T13:52:36,933 INFO [RS:1;2467a7071e00:42563 {}] regionserver.HRegionServer(976): stopping server 2467a7071e00,42563,1731246752340; all regions closed. 2024-11-10T13:52:36,933 INFO [RS:2;2467a7071e00:33405 {}] regionserver.HRegionServer(976): stopping server 2467a7071e00,33405,1731246752389; all regions closed. 2024-11-10T13:52:36,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44149 is added to blk_1073741826_1016 (size=1298) 2024-11-10T13:52:36,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35197 is added to blk_1073741826_1016 (size=1298) 2024-11-10T13:52:36,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35197 is added to blk_1073741829_1019 (size=2751) 2024-11-10T13:52:36,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741829_1019 (size=2751) 2024-11-10T13:52:36,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741826_1016 (size=1298) 2024-11-10T13:52:36,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44149 is added to blk_1073741829_1019 (size=2751) 2024-11-10T13:52:36,943 DEBUG [RS:2;2467a7071e00:33405 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/oldWALs 2024-11-10T13:52:36,943 INFO [RS:2;2467a7071e00:33405 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 2467a7071e00%2C33405%2C1731246752389:(num 1731246753965) 2024-11-10T13:52:36,943 DEBUG [RS:2;2467a7071e00:33405 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:52:36,943 INFO [RS:2;2467a7071e00:33405 {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T13:52:36,943 DEBUG [RS:1;2467a7071e00:42563 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/oldWALs 2024-11-10T13:52:36,943 INFO [RS:1;2467a7071e00:42563 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 2467a7071e00%2C42563%2C1731246752340.meta:.meta(num 1731246754369) 2024-11-10T13:52:36,943 INFO [RS:2;2467a7071e00:33405 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T13:52:36,943 INFO [RS:2;2467a7071e00:33405 {}] hbase.ChoreService(370): Chore service for: regionserver/2467a7071e00:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-10T13:52:36,944 INFO [RS:2;2467a7071e00:33405 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-10T13:52:36,944 INFO [regionserver/2467a7071e00:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T13:52:36,944 INFO [RS:2;2467a7071e00:33405 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-10T13:52:36,944 INFO [RS:2;2467a7071e00:33405 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-10T13:52:36,944 INFO [RS:2;2467a7071e00:33405 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T13:52:36,944 INFO [RS:2;2467a7071e00:33405 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:33405 2024-11-10T13:52:36,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44149 is added to blk_1073741827_1017 (size=93) 2024-11-10T13:52:36,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35197 is added to blk_1073741827_1017 (size=93) 2024-11-10T13:52:36,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741827_1017 (size=93) 2024-11-10T13:52:36,950 DEBUG [RS:1;2467a7071e00:42563 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/oldWALs 2024-11-10T13:52:36,950 INFO [RS:1;2467a7071e00:42563 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 2467a7071e00%2C42563%2C1731246752340:(num 1731246753965) 2024-11-10T13:52:36,950 DEBUG [RS:1;2467a7071e00:42563 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:52:36,950 INFO [RS:1;2467a7071e00:42563 {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T13:52:36,950 INFO [RS:1;2467a7071e00:42563 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T13:52:36,950 INFO [RS:1;2467a7071e00:42563 {}] hbase.ChoreService(370): Chore service for: regionserver/2467a7071e00:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-10T13:52:36,951 INFO [RS:1;2467a7071e00:42563 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T13:52:36,951 INFO [regionserver/2467a7071e00:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T13:52:36,951 INFO [RS:1;2467a7071e00:42563 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:42563 2024-11-10T13:52:36,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36081-0x1012504f7810000, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T13:52:36,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33405-0x1012504f7810003, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2467a7071e00,33405,1731246752389 2024-11-10T13:52:36,956 INFO [RS:2;2467a7071e00:33405 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T13:52:36,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42563-0x1012504f7810002, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2467a7071e00,42563,1731246752340 2024-11-10T13:52:36,966 INFO [RS:1;2467a7071e00:42563 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T13:52:36,966 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2467a7071e00,33405,1731246752389] 2024-11-10T13:52:36,987 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2467a7071e00,33405,1731246752389 already deleted, retry=false 2024-11-10T13:52:36,987 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2467a7071e00,33405,1731246752389 expired; onlineServers=1 2024-11-10T13:52:36,987 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2467a7071e00,42563,1731246752340] 2024-11-10T13:52:36,998 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2467a7071e00,42563,1731246752340 already deleted, retry=false 2024-11-10T13:52:36,998 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2467a7071e00,42563,1731246752340 expired; onlineServers=0 2024-11-10T13:52:36,998 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '2467a7071e00,36081,1731246751482' ***** 2024-11-10T13:52:36,998 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-10T13:52:36,999 INFO [M:0;2467a7071e00:36081 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T13:52:36,999 INFO [M:0;2467a7071e00:36081 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T13:52:36,999 DEBUG [M:0;2467a7071e00:36081 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-10T13:52:36,999 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-10T13:52:36,999 DEBUG [M:0;2467a7071e00:36081 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-10T13:52:36,999 DEBUG [master/2467a7071e00:0:becomeActiveMaster-HFileCleaner.large.0-1731246753575 {}] cleaner.HFileCleaner(306): Exit Thread[master/2467a7071e00:0:becomeActiveMaster-HFileCleaner.large.0-1731246753575,5,FailOnTimeoutGroup] 2024-11-10T13:52:36,999 DEBUG [master/2467a7071e00:0:becomeActiveMaster-HFileCleaner.small.0-1731246753576 {}] cleaner.HFileCleaner(306): Exit Thread[master/2467a7071e00:0:becomeActiveMaster-HFileCleaner.small.0-1731246753576,5,FailOnTimeoutGroup] 2024-11-10T13:52:37,000 INFO [M:0;2467a7071e00:36081 {}] hbase.ChoreService(370): Chore service for: master/2467a7071e00:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-10T13:52:37,000 INFO [M:0;2467a7071e00:36081 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T13:52:37,000 DEBUG [M:0;2467a7071e00:36081 {}] master.HMaster(1795): Stopping service threads 2024-11-10T13:52:37,000 INFO [M:0;2467a7071e00:36081 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-10T13:52:37,000 INFO [M:0;2467a7071e00:36081 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-10T13:52:37,001 INFO [M:0;2467a7071e00:36081 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-10T13:52:37,001 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-10T13:52:37,008 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36081-0x1012504f7810000, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-10T13:52:37,008 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36081-0x1012504f7810000, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:37,008 DEBUG [M:0;2467a7071e00:36081 {}] zookeeper.ZKUtil(347): master:36081-0x1012504f7810000, quorum=127.0.0.1:62793, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-10T13:52:37,008 WARN [M:0;2467a7071e00:36081 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-10T13:52:37,009 INFO [M:0;2467a7071e00:36081 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/.lastflushedseqids 2024-11-10T13:52:37,021 WARN [M:0;2467a7071e00:36081 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T13:52:37,021 WARN [M:0;2467a7071e00:36081 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T13:52:37,024 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-180646471_22 at /127.0.0.1:48564 [Receiving block BP-1365254765-172.17.0.3-1731246746770:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:35197:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48564 dst: /127.0.0.1:35197 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:52:37,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35197 is added to blk_-9223372036854775584_1033 (size=127) 2024-11-10T13:52:37,030 WARN [M:0;2467a7071e00:36081 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T13:52:37,030 INFO [M:0;2467a7071e00:36081 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-10T13:52:37,030 INFO [M:0;2467a7071e00:36081 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-10T13:52:37,030 DEBUG [M:0;2467a7071e00:36081 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-10T13:52:37,030 INFO [M:0;2467a7071e00:36081 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:52:37,030 DEBUG [M:0;2467a7071e00:36081 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:52:37,030 DEBUG [M:0;2467a7071e00:36081 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-10T13:52:37,030 DEBUG [M:0;2467a7071e00:36081 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:52:37,031 INFO [M:0;2467a7071e00:36081 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.83 KB heapSize=34.12 KB 2024-11-10T13:52:37,048 DEBUG [M:0;2467a7071e00:36081 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b59551dec3604badab650a1365d50024 is 82, key is hbase:meta,,1/info:regioninfo/1731246754452/Put/seqid=0 2024-11-10T13:52:37,050 WARN [M:0;2467a7071e00:36081 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T13:52:37,050 WARN [M:0;2467a7071e00:36081 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T13:52:37,053 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-180646471_22 at /127.0.0.1:51510 [Receiving block BP-1365254765-172.17.0.3-1731246746770:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:40371:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51510 dst: /127.0.0.1:40371 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:52:37,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_-9223372036854775568_1035 (size=5672) 2024-11-10T13:52:37,058 WARN [M:0;2467a7071e00:36081 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T13:52:37,058 INFO [M:0;2467a7071e00:36081 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b59551dec3604badab650a1365d50024 2024-11-10T13:52:37,077 INFO [RS:2;2467a7071e00:33405 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T13:52:37,077 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33405-0x1012504f7810003, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T13:52:37,077 INFO [RS:2;2467a7071e00:33405 {}] regionserver.HRegionServer(1031): Exiting; stopping=2467a7071e00,33405,1731246752389; zookeeper connection closed. 2024-11-10T13:52:37,077 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33405-0x1012504f7810003, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T13:52:37,077 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@e29a150 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@e29a150 2024-11-10T13:52:37,083 DEBUG [M:0;2467a7071e00:36081 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0ef880ed35614da4ae1ac7d9981a930a is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731246755282/Put/seqid=0 2024-11-10T13:52:37,085 WARN [M:0;2467a7071e00:36081 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T13:52:37,086 WARN [M:0;2467a7071e00:36081 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T13:52:37,087 INFO [RS:1;2467a7071e00:42563 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T13:52:37,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42563-0x1012504f7810002, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T13:52:37,087 INFO [RS:1;2467a7071e00:42563 {}] regionserver.HRegionServer(1031): Exiting; stopping=2467a7071e00,42563,1731246752340; zookeeper connection closed. 2024-11-10T13:52:37,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42563-0x1012504f7810002, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T13:52:37,088 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6bb7531 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6bb7531 2024-11-10T13:52:37,088 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-10T13:52:37,089 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-180646471_22 at /127.0.0.1:51520 [Receiving block BP-1365254765-172.17.0.3-1731246746770:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:40371:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51520 dst: /127.0.0.1:40371 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:52:37,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_-9223372036854775552_1037 (size=6439) 2024-11-10T13:52:37,497 WARN [M:0;2467a7071e00:36081 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T13:52:37,498 INFO [M:0;2467a7071e00:36081 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.14 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0ef880ed35614da4ae1ac7d9981a930a 2024-11-10T13:52:37,527 DEBUG [M:0;2467a7071e00:36081 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6adc3a6def064913a396b9dcb6b0c1ee is 69, key is 2467a7071e00,33405,1731246752389/rs:state/1731246753679/Put/seqid=0 2024-11-10T13:52:37,529 WARN [M:0;2467a7071e00:36081 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T13:52:37,529 WARN [M:0;2467a7071e00:36081 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-10T13:52:37,532 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-180646471_22 at /127.0.0.1:51550 [Receiving block BP-1365254765-172.17.0.3-1731246746770:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:40371:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51550 dst: /127.0.0.1:40371 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:52:37,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_-9223372036854775536_1039 (size=5294) 2024-11-10T13:52:37,538 WARN [M:0;2467a7071e00:36081 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-10T13:52:37,539 INFO [M:0;2467a7071e00:36081 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6adc3a6def064913a396b9dcb6b0c1ee 2024-11-10T13:52:37,547 DEBUG [M:0;2467a7071e00:36081 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b59551dec3604badab650a1365d50024 as hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b59551dec3604badab650a1365d50024 2024-11-10T13:52:37,554 INFO [M:0;2467a7071e00:36081 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b59551dec3604badab650a1365d50024, entries=8, sequenceid=72, filesize=5.5 K 2024-11-10T13:52:37,555 DEBUG [M:0;2467a7071e00:36081 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0ef880ed35614da4ae1ac7d9981a930a as hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/0ef880ed35614da4ae1ac7d9981a930a 2024-11-10T13:52:37,563 INFO [M:0;2467a7071e00:36081 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/0ef880ed35614da4ae1ac7d9981a930a, entries=8, sequenceid=72, filesize=6.3 K 2024-11-10T13:52:37,564 DEBUG [M:0;2467a7071e00:36081 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6adc3a6def064913a396b9dcb6b0c1ee as hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6adc3a6def064913a396b9dcb6b0c1ee 2024-11-10T13:52:37,572 INFO [M:0;2467a7071e00:36081 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6adc3a6def064913a396b9dcb6b0c1ee, entries=3, sequenceid=72, filesize=5.2 K 2024-11-10T13:52:37,573 INFO [M:0;2467a7071e00:36081 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 543ms, sequenceid=72, compaction requested=false 2024-11-10T13:52:37,574 INFO [M:0;2467a7071e00:36081 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:52:37,575 DEBUG [M:0;2467a7071e00:36081 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731246757030Disabling compacts and flushes for region at 1731246757030Disabling writes for close at 1731246757030Obtaining lock to block concurrent updates at 1731246757031 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731246757031Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27471, getHeapSize=34872, getOffHeapSize=0, getCellsCount=85 at 1731246757031Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731246757032 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731246757032Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731246757047 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731246757047Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731246757066 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731246757082 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731246757082Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731246757513 (+431 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731246757527 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731246757527Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@369c96b8: reopening flushed file at 1731246757545 (+18 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5de72ee5: reopening flushed file at 1731246757554 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5c9f083: reopening flushed file at 1731246757563 (+9 ms)Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 543ms, sequenceid=72, compaction requested=false at 1731246757573 (+10 ms)Writing region close event to WAL at 1731246757574 (+1 ms)Closed at 1731246757574 2024-11-10T13:52:37,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40371 is added to blk_1073741825_1011 (size=32674) 2024-11-10T13:52:37,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35197 is added to blk_1073741825_1011 (size=32674) 2024-11-10T13:52:37,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44149 is added to blk_1073741825_1011 (size=32674) 2024-11-10T13:52:37,579 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T13:52:37,579 INFO [M:0;2467a7071e00:36081 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-10T13:52:37,579 INFO [M:0;2467a7071e00:36081 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:36081 2024-11-10T13:52:37,579 INFO [M:0;2467a7071e00:36081 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T13:52:37,730 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36081-0x1012504f7810000, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T13:52:37,730 INFO [M:0;2467a7071e00:36081 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T13:52:37,730 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36081-0x1012504f7810000, quorum=127.0.0.1:62793, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T13:52:37,773 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3114ae69{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:52:37,777 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3c70a874{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T13:52:37,777 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T13:52:37,777 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5822645a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T13:52:37,778 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@16cd567f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84/hadoop.log.dir/,STOPPED} 2024-11-10T13:52:37,780 WARN [BP-1365254765-172.17.0.3-1731246746770 heartbeating to localhost/127.0.0.1:44185 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T13:52:37,780 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T13:52:37,780 WARN [BP-1365254765-172.17.0.3-1731246746770 heartbeating to localhost/127.0.0.1:44185 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1365254765-172.17.0.3-1731246746770 (Datanode Uuid 4ee6e221-f22e-4dea-9510-ec93742f5c09) service to localhost/127.0.0.1:44185 2024-11-10T13:52:37,780 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T13:52:37,781 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84/cluster_c5b26055-ab68-e52f-57e5-6cb60b7810be/data/data5/current/BP-1365254765-172.17.0.3-1731246746770 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:52:37,781 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84/cluster_c5b26055-ab68-e52f-57e5-6cb60b7810be/data/data6/current/BP-1365254765-172.17.0.3-1731246746770 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:52:37,782 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T13:52:37,784 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@353955e9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:52:37,784 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@11738cd8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T13:52:37,784 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T13:52:37,785 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@40eb7053{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T13:52:37,785 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@510fec09{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84/hadoop.log.dir/,STOPPED} 2024-11-10T13:52:37,786 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T13:52:37,786 WARN [BP-1365254765-172.17.0.3-1731246746770 heartbeating to localhost/127.0.0.1:44185 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T13:52:37,786 WARN [BP-1365254765-172.17.0.3-1731246746770 heartbeating to localhost/127.0.0.1:44185 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1365254765-172.17.0.3-1731246746770 (Datanode Uuid 80b895b3-c902-4d4f-91a8-f256e45210a9) service to localhost/127.0.0.1:44185 2024-11-10T13:52:37,786 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T13:52:37,786 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84/cluster_c5b26055-ab68-e52f-57e5-6cb60b7810be/data/data3/current/BP-1365254765-172.17.0.3-1731246746770 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:52:37,787 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84/cluster_c5b26055-ab68-e52f-57e5-6cb60b7810be/data/data4/current/BP-1365254765-172.17.0.3-1731246746770 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:52:37,787 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T13:52:37,789 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1b97a472{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:52:37,789 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3722a29b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T13:52:37,789 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T13:52:37,789 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@69893329{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T13:52:37,789 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3a5de9e4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84/hadoop.log.dir/,STOPPED} 2024-11-10T13:52:37,790 WARN [BP-1365254765-172.17.0.3-1731246746770 heartbeating to localhost/127.0.0.1:44185 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T13:52:37,790 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T13:52:37,790 WARN [BP-1365254765-172.17.0.3-1731246746770 heartbeating to localhost/127.0.0.1:44185 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1365254765-172.17.0.3-1731246746770 (Datanode Uuid e21c19b5-e4bd-4d98-bd31-53ea3ca956cb) service to localhost/127.0.0.1:44185 2024-11-10T13:52:37,790 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T13:52:37,791 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84/cluster_c5b26055-ab68-e52f-57e5-6cb60b7810be/data/data1/current/BP-1365254765-172.17.0.3-1731246746770 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:52:37,791 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84/cluster_c5b26055-ab68-e52f-57e5-6cb60b7810be/data/data2/current/BP-1365254765-172.17.0.3-1731246746770 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:52:37,791 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T13:52:37,797 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@62d6efd9{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-10T13:52:37,798 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@353d35a1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T13:52:37,798 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T13:52:37,798 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ce709a8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T13:52:37,798 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@760c69c0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84/hadoop.log.dir/,STOPPED} 2024-11-10T13:52:37,805 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-10T13:52:37,835 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-10T13:52:37,841 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=92 (was 162), OpenFileDescriptor=445 (was 391) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=106 (was 64) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=7151 (was 7460) 2024-11-10T13:52:37,847 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=92, OpenFileDescriptor=445, MaxFileDescriptor=1048576, SystemLoadAverage=106, ProcessCount=11, AvailableMemoryMB=7151 2024-11-10T13:52:37,847 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-10T13:52:37,848 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84/hadoop.log.dir so I do NOT create it in target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25 2024-11-10T13:52:37,848 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe934c0a-4a7e-6232-95bb-06f691156b84/hadoop.tmp.dir so I do NOT create it in target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25 2024-11-10T13:52:37,848 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25/cluster_c9a2529b-8d95-60c1-07ef-f3ab1ab8f621, deleteOnExit=true 2024-11-10T13:52:37,848 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-10T13:52:37,848 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25/test.cache.data in system properties and HBase conf 2024-11-10T13:52:37,848 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25/hadoop.tmp.dir in system properties and HBase conf 2024-11-10T13:52:37,848 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25/hadoop.log.dir in system properties and HBase conf 2024-11-10T13:52:37,848 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-10T13:52:37,849 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-10T13:52:37,849 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-10T13:52:37,849 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-10T13:52:37,849 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-10T13:52:37,849 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-10T13:52:37,849 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-10T13:52:37,849 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-10T13:52:37,850 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-10T13:52:37,850 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-10T13:52:37,850 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-10T13:52:37,850 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-10T13:52:37,850 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-10T13:52:37,850 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25/nfs.dump.dir in system properties and HBase conf 2024-11-10T13:52:37,850 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25/java.io.tmpdir in system properties and HBase conf 2024-11-10T13:52:37,850 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-10T13:52:37,850 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-10T13:52:37,850 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-10T13:52:38,223 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T13:52:38,228 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T13:52:38,229 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T13:52:38,229 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T13:52:38,229 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-10T13:52:38,230 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T13:52:38,231 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@77ad49ec{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25/hadoop.log.dir/,AVAILABLE} 2024-11-10T13:52:38,231 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@43a7f4cb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T13:52:38,325 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7d1fffc5{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25/java.io.tmpdir/jetty-localhost-45333-hadoop-hdfs-3_4_1-tests_jar-_-any-14037528091904210036/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-10T13:52:38,326 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7e69ce72{HTTP/1.1, (http/1.1)}{localhost:45333} 2024-11-10T13:52:38,326 INFO [Time-limited test {}] server.Server(415): Started @13333ms 2024-11-10T13:52:38,610 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T13:52:38,613 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T13:52:38,614 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T13:52:38,614 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T13:52:38,614 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-10T13:52:38,614 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@436188c4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25/hadoop.log.dir/,AVAILABLE} 2024-11-10T13:52:38,615 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11812ea4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T13:52:38,707 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4c5f5451{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25/java.io.tmpdir/jetty-localhost-43541-hadoop-hdfs-3_4_1-tests_jar-_-any-6052728942351008823/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:52:38,708 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4a8b9e38{HTTP/1.1, (http/1.1)}{localhost:43541} 2024-11-10T13:52:38,708 INFO [Time-limited test {}] server.Server(415): Started @13715ms 2024-11-10T13:52:38,709 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T13:52:38,741 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T13:52:38,744 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T13:52:38,746 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T13:52:38,746 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T13:52:38,746 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-10T13:52:38,747 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6ba59100{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25/hadoop.log.dir/,AVAILABLE} 2024-11-10T13:52:38,747 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6686fe53{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T13:52:38,840 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4cfd3d1f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25/java.io.tmpdir/jetty-localhost-41379-hadoop-hdfs-3_4_1-tests_jar-_-any-2419275380940721810/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:52:38,840 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@d52b56f{HTTP/1.1, (http/1.1)}{localhost:41379} 2024-11-10T13:52:38,840 INFO [Time-limited test {}] server.Server(415): Started @13847ms 2024-11-10T13:52:38,841 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T13:52:38,868 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T13:52:38,871 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T13:52:38,872 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T13:52:38,872 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T13:52:38,872 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-10T13:52:38,873 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@10a2b6be{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25/hadoop.log.dir/,AVAILABLE} 2024-11-10T13:52:38,873 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@190023f0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T13:52:38,968 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@47c8a2ee{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25/java.io.tmpdir/jetty-localhost-40401-hadoop-hdfs-3_4_1-tests_jar-_-any-6199592096356228822/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:52:38,968 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@323fafe0{HTTP/1.1, (http/1.1)}{localhost:40401} 2024-11-10T13:52:38,968 INFO [Time-limited test {}] server.Server(415): Started @13976ms 2024-11-10T13:52:38,970 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T13:52:39,917 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-10T13:52:39,925 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:52:39,966 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:52:39,966 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:52:39,982 WARN [Thread-566 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25/cluster_c9a2529b-8d95-60c1-07ef-f3ab1ab8f621/data/data1/current/BP-1551880796-172.17.0.3-1731246757876/current, will proceed with Du for space computation calculation, 2024-11-10T13:52:39,983 WARN [Thread-567 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25/cluster_c9a2529b-8d95-60c1-07ef-f3ab1ab8f621/data/data2/current/BP-1551880796-172.17.0.3-1731246757876/current, will proceed with Du for space computation calculation, 2024-11-10T13:52:39,999 WARN [Thread-505 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T13:52:40,002 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3f76616aa3380497 with lease ID 0xba510508cf3c580c: Processing first storage report for DS-28ae725f-cc41-4fd9-9b96-78166b80b6e1 from datanode DatanodeRegistration(127.0.0.1:35811, datanodeUuid=514e5816-9871-4e02-a4d7-635e19b3d895, infoPort=43871, infoSecurePort=0, ipcPort=43315, storageInfo=lv=-57;cid=testClusterID;nsid=1706344471;c=1731246757876) 2024-11-10T13:52:40,002 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3f76616aa3380497 with lease ID 0xba510508cf3c580c: from storage DS-28ae725f-cc41-4fd9-9b96-78166b80b6e1 node DatanodeRegistration(127.0.0.1:35811, datanodeUuid=514e5816-9871-4e02-a4d7-635e19b3d895, infoPort=43871, infoSecurePort=0, ipcPort=43315, storageInfo=lv=-57;cid=testClusterID;nsid=1706344471;c=1731246757876), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T13:52:40,002 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3f76616aa3380497 with lease ID 0xba510508cf3c580c: Processing first storage report for DS-6068984e-e2fa-4a54-a08f-c8f70a99f01c from datanode DatanodeRegistration(127.0.0.1:35811, datanodeUuid=514e5816-9871-4e02-a4d7-635e19b3d895, infoPort=43871, infoSecurePort=0, ipcPort=43315, storageInfo=lv=-57;cid=testClusterID;nsid=1706344471;c=1731246757876) 2024-11-10T13:52:40,002 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3f76616aa3380497 with lease ID 0xba510508cf3c580c: from storage DS-6068984e-e2fa-4a54-a08f-c8f70a99f01c node DatanodeRegistration(127.0.0.1:35811, datanodeUuid=514e5816-9871-4e02-a4d7-635e19b3d895, infoPort=43871, infoSecurePort=0, ipcPort=43315, storageInfo=lv=-57;cid=testClusterID;nsid=1706344471;c=1731246757876), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-10T13:52:40,229 WARN [Thread-577 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25/cluster_c9a2529b-8d95-60c1-07ef-f3ab1ab8f621/data/data3/current/BP-1551880796-172.17.0.3-1731246757876/current, will proceed with Du for space computation calculation, 2024-11-10T13:52:40,229 WARN [Thread-578 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25/cluster_c9a2529b-8d95-60c1-07ef-f3ab1ab8f621/data/data4/current/BP-1551880796-172.17.0.3-1731246757876/current, will proceed with Du for space computation calculation, 2024-11-10T13:52:40,248 WARN [Thread-528 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T13:52:40,250 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6d48ebc557aa722d with lease ID 0xba510508cf3c580d: Processing first storage report for DS-626f5840-f682-4760-acef-0757822018f2 from datanode DatanodeRegistration(127.0.0.1:36097, datanodeUuid=0a978caf-0fe4-44fe-a89c-628be87127cb, infoPort=34561, infoSecurePort=0, ipcPort=40487, storageInfo=lv=-57;cid=testClusterID;nsid=1706344471;c=1731246757876) 2024-11-10T13:52:40,251 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6d48ebc557aa722d with lease ID 0xba510508cf3c580d: from storage DS-626f5840-f682-4760-acef-0757822018f2 node DatanodeRegistration(127.0.0.1:36097, datanodeUuid=0a978caf-0fe4-44fe-a89c-628be87127cb, infoPort=34561, infoSecurePort=0, ipcPort=40487, storageInfo=lv=-57;cid=testClusterID;nsid=1706344471;c=1731246757876), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T13:52:40,251 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6d48ebc557aa722d with lease ID 0xba510508cf3c580d: Processing first storage report for DS-47cd3177-776a-4971-8feb-0003a6db9496 from datanode DatanodeRegistration(127.0.0.1:36097, datanodeUuid=0a978caf-0fe4-44fe-a89c-628be87127cb, infoPort=34561, infoSecurePort=0, ipcPort=40487, storageInfo=lv=-57;cid=testClusterID;nsid=1706344471;c=1731246757876) 2024-11-10T13:52:40,251 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6d48ebc557aa722d with lease ID 0xba510508cf3c580d: from storage DS-47cd3177-776a-4971-8feb-0003a6db9496 node DatanodeRegistration(127.0.0.1:36097, datanodeUuid=0a978caf-0fe4-44fe-a89c-628be87127cb, infoPort=34561, infoSecurePort=0, ipcPort=40487, storageInfo=lv=-57;cid=testClusterID;nsid=1706344471;c=1731246757876), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T13:52:40,327 WARN [Thread-588 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25/cluster_c9a2529b-8d95-60c1-07ef-f3ab1ab8f621/data/data5/current/BP-1551880796-172.17.0.3-1731246757876/current, will proceed with Du for space computation calculation, 2024-11-10T13:52:40,327 WARN [Thread-589 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25/cluster_c9a2529b-8d95-60c1-07ef-f3ab1ab8f621/data/data6/current/BP-1551880796-172.17.0.3-1731246757876/current, will proceed with Du for space computation calculation, 2024-11-10T13:52:40,345 WARN [Thread-550 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T13:52:40,348 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc11ae8331afda338 with lease ID 0xba510508cf3c580e: Processing first storage report for DS-7e2cef11-08df-49c2-b4d7-709a0271301d from datanode DatanodeRegistration(127.0.0.1:41263, datanodeUuid=4bb7a198-2c96-4eda-bb4f-2a2fd3c8e571, infoPort=37481, infoSecurePort=0, ipcPort=34563, storageInfo=lv=-57;cid=testClusterID;nsid=1706344471;c=1731246757876) 2024-11-10T13:52:40,348 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc11ae8331afda338 with lease ID 0xba510508cf3c580e: from storage DS-7e2cef11-08df-49c2-b4d7-709a0271301d node DatanodeRegistration(127.0.0.1:41263, datanodeUuid=4bb7a198-2c96-4eda-bb4f-2a2fd3c8e571, infoPort=37481, infoSecurePort=0, ipcPort=34563, storageInfo=lv=-57;cid=testClusterID;nsid=1706344471;c=1731246757876), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T13:52:40,348 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc11ae8331afda338 with lease ID 0xba510508cf3c580e: Processing first storage report for DS-e57c53a7-a1bc-47df-b98d-2a3506a0041f from datanode DatanodeRegistration(127.0.0.1:41263, datanodeUuid=4bb7a198-2c96-4eda-bb4f-2a2fd3c8e571, infoPort=37481, infoSecurePort=0, ipcPort=34563, storageInfo=lv=-57;cid=testClusterID;nsid=1706344471;c=1731246757876) 2024-11-10T13:52:40,348 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc11ae8331afda338 with lease ID 0xba510508cf3c580e: from storage DS-e57c53a7-a1bc-47df-b98d-2a3506a0041f node DatanodeRegistration(127.0.0.1:41263, datanodeUuid=4bb7a198-2c96-4eda-bb4f-2a2fd3c8e571, infoPort=37481, infoSecurePort=0, ipcPort=34563, storageInfo=lv=-57;cid=testClusterID;nsid=1706344471;c=1731246757876), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-10T13:52:40,420 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25 2024-11-10T13:52:40,424 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25/cluster_c9a2529b-8d95-60c1-07ef-f3ab1ab8f621/zookeeper_0, clientPort=52921, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25/cluster_c9a2529b-8d95-60c1-07ef-f3ab1ab8f621/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25/cluster_c9a2529b-8d95-60c1-07ef-f3ab1ab8f621/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-10T13:52:40,425 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=52921 2024-11-10T13:52:40,425 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:52:40,428 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:52:40,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741825_1001 (size=7) 2024-11-10T13:52:40,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41263 is added to blk_1073741825_1001 (size=7) 2024-11-10T13:52:40,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36097 is added to blk_1073741825_1001 (size=7) 2024-11-10T13:52:40,443 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1 with version=8 2024-11-10T13:52:40,444 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44185/user/jenkins/test-data/eb7523a4-1971-d33b-0f53-0209dd64f2b7/hbase-staging 2024-11-10T13:52:40,446 INFO [Time-limited test {}] client.ConnectionUtils(128): master/2467a7071e00:0 server-side Connection retries=45 2024-11-10T13:52:40,447 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T13:52:40,447 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T13:52:40,447 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T13:52:40,447 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T13:52:40,447 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T13:52:40,447 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-10T13:52:40,447 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T13:52:40,448 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:40991 2024-11-10T13:52:40,451 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40991 connecting to ZooKeeper ensemble=127.0.0.1:52921 2024-11-10T13:52:40,510 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:409910x0, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T13:52:40,511 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40991-0x10125051d920000 connected 2024-11-10T13:52:40,609 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:52:40,615 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:52:40,617 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40991-0x10125051d920000, quorum=127.0.0.1:52921, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T13:52:40,618 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1, hbase.cluster.distributed=false 2024-11-10T13:52:40,620 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40991-0x10125051d920000, quorum=127.0.0.1:52921, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T13:52:40,620 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40991 2024-11-10T13:52:40,620 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40991 2024-11-10T13:52:40,620 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40991 2024-11-10T13:52:40,628 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40991 2024-11-10T13:52:40,628 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40991 2024-11-10T13:52:40,644 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2467a7071e00:0 server-side Connection retries=45 2024-11-10T13:52:40,644 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T13:52:40,644 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T13:52:40,645 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T13:52:40,645 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T13:52:40,645 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T13:52:40,645 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-10T13:52:40,645 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T13:52:40,646 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:43963 2024-11-10T13:52:40,647 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43963 connecting to ZooKeeper ensemble=127.0.0.1:52921 2024-11-10T13:52:40,648 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:52:40,650 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:52:40,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:439630x0, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T13:52:40,661 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43963-0x10125051d920001 connected 2024-11-10T13:52:40,661 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43963-0x10125051d920001, quorum=127.0.0.1:52921, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T13:52:40,661 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-10T13:52:40,662 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-10T13:52:40,663 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43963-0x10125051d920001, quorum=127.0.0.1:52921, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-10T13:52:40,664 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43963-0x10125051d920001, quorum=127.0.0.1:52921, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T13:52:40,665 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43963 2024-11-10T13:52:40,665 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43963 2024-11-10T13:52:40,666 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43963 2024-11-10T13:52:40,666 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43963 2024-11-10T13:52:40,666 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43963 2024-11-10T13:52:40,683 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2467a7071e00:0 server-side Connection retries=45 2024-11-10T13:52:40,684 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T13:52:40,684 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T13:52:40,684 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T13:52:40,684 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T13:52:40,684 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T13:52:40,684 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-10T13:52:40,684 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T13:52:40,685 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:33817 2024-11-10T13:52:40,686 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33817 connecting to ZooKeeper ensemble=127.0.0.1:52921 2024-11-10T13:52:40,686 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:52:40,688 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:52:40,703 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:338170x0, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T13:52:40,703 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33817-0x10125051d920002 connected 2024-11-10T13:52:40,703 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33817-0x10125051d920002, quorum=127.0.0.1:52921, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T13:52:40,704 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-10T13:52:40,704 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-10T13:52:40,705 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33817-0x10125051d920002, quorum=127.0.0.1:52921, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-10T13:52:40,707 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33817-0x10125051d920002, quorum=127.0.0.1:52921, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T13:52:40,709 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33817 2024-11-10T13:52:40,710 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33817 2024-11-10T13:52:40,710 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33817 2024-11-10T13:52:40,710 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33817 2024-11-10T13:52:40,711 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33817 2024-11-10T13:52:40,727 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2467a7071e00:0 server-side Connection retries=45 2024-11-10T13:52:40,727 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T13:52:40,727 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T13:52:40,727 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T13:52:40,727 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T13:52:40,727 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T13:52:40,727 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-10T13:52:40,727 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T13:52:40,728 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:38731 2024-11-10T13:52:40,730 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38731 connecting to ZooKeeper ensemble=127.0.0.1:52921 2024-11-10T13:52:40,730 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:52:40,732 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:52:40,745 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:387310x0, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T13:52:40,745 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38731-0x10125051d920003 connected 2024-11-10T13:52:40,745 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38731-0x10125051d920003, quorum=127.0.0.1:52921, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T13:52:40,746 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-10T13:52:40,746 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-10T13:52:40,747 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38731-0x10125051d920003, quorum=127.0.0.1:52921, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-10T13:52:40,749 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38731-0x10125051d920003, quorum=127.0.0.1:52921, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T13:52:40,750 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38731 2024-11-10T13:52:40,750 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38731 2024-11-10T13:52:40,750 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38731 2024-11-10T13:52:40,751 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38731 2024-11-10T13:52:40,751 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38731 2024-11-10T13:52:40,767 DEBUG [M:0;2467a7071e00:40991 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;2467a7071e00:40991 2024-11-10T13:52:40,768 INFO [master/2467a7071e00:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/2467a7071e00,40991,1731246760446 2024-11-10T13:52:40,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33817-0x10125051d920002, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T13:52:40,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43963-0x10125051d920001, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T13:52:40,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38731-0x10125051d920003, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T13:52:40,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40991-0x10125051d920000, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T13:52:40,777 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40991-0x10125051d920000, quorum=127.0.0.1:52921, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/2467a7071e00,40991,1731246760446 2024-11-10T13:52:40,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33817-0x10125051d920002, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-10T13:52:40,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43963-0x10125051d920001, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-10T13:52:40,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38731-0x10125051d920003, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-10T13:52:40,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40991-0x10125051d920000, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:40,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38731-0x10125051d920003, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:40,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33817-0x10125051d920002, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:40,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43963-0x10125051d920001, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:40,788 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40991-0x10125051d920000, quorum=127.0.0.1:52921, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-10T13:52:40,788 INFO [master/2467a7071e00:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/2467a7071e00,40991,1731246760446 from backup master directory 2024-11-10T13:52:40,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38731-0x10125051d920003, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T13:52:40,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40991-0x10125051d920000, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/2467a7071e00,40991,1731246760446 2024-11-10T13:52:40,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43963-0x10125051d920001, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T13:52:40,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33817-0x10125051d920002, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T13:52:40,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40991-0x10125051d920000, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T13:52:40,797 WARN [master/2467a7071e00:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T13:52:40,797 INFO [master/2467a7071e00:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=2467a7071e00,40991,1731246760446 2024-11-10T13:52:40,803 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/hbase.id] with ID: 091205a3-36eb-4187-afd9-d6adf4851a4d 2024-11-10T13:52:40,803 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/.tmp/hbase.id 2024-11-10T13:52:40,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36097 is added to blk_1073741826_1002 (size=42) 2024-11-10T13:52:40,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41263 is added to blk_1073741826_1002 (size=42) 2024-11-10T13:52:40,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741826_1002 (size=42) 2024-11-10T13:52:40,813 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/.tmp/hbase.id]:[hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/hbase.id] 2024-11-10T13:52:40,831 INFO [master/2467a7071e00:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:52:40,831 INFO [master/2467a7071e00:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-10T13:52:40,833 INFO [master/2467a7071e00:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-10T13:52:40,839 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38731-0x10125051d920003, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:40,839 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43963-0x10125051d920001, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:40,839 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33817-0x10125051d920002, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:40,839 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40991-0x10125051d920000, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:40,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36097 is added to blk_1073741827_1003 (size=196) 2024-11-10T13:52:40,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41263 is added to blk_1073741827_1003 (size=196) 2024-11-10T13:52:40,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741827_1003 (size=196) 2024-11-10T13:52:40,850 INFO [master/2467a7071e00:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-10T13:52:40,851 INFO [master/2467a7071e00:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-10T13:52:40,851 INFO [master/2467a7071e00:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T13:52:40,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41263 is added to blk_1073741828_1004 (size=1189) 2024-11-10T13:52:40,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36097 is added to blk_1073741828_1004 (size=1189) 2024-11-10T13:52:40,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741828_1004 (size=1189) 2024-11-10T13:52:40,867 INFO [master/2467a7071e00:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/MasterData/data/master/store 2024-11-10T13:52:40,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36097 is added to blk_1073741829_1005 (size=34) 2024-11-10T13:52:40,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41263 is added to blk_1073741829_1005 (size=34) 2024-11-10T13:52:40,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741829_1005 (size=34) 2024-11-10T13:52:40,877 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T13:52:40,877 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-10T13:52:40,877 INFO [master/2467a7071e00:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:52:40,877 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:52:40,877 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-10T13:52:40,877 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:52:40,877 INFO [master/2467a7071e00:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:52:40,878 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731246760877Disabling compacts and flushes for region at 1731246760877Disabling writes for close at 1731246760877Writing region close event to WAL at 1731246760877Closed at 1731246760877 2024-11-10T13:52:40,878 WARN [master/2467a7071e00:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/MasterData/data/master/store/.initializing 2024-11-10T13:52:40,879 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/MasterData/WALs/2467a7071e00,40991,1731246760446 2024-11-10T13:52:40,883 INFO [master/2467a7071e00:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2467a7071e00%2C40991%2C1731246760446, suffix=, logDir=hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/MasterData/WALs/2467a7071e00,40991,1731246760446, archiveDir=hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/MasterData/oldWALs, maxLogs=10 2024-11-10T13:52:40,884 INFO [master/2467a7071e00:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2467a7071e00%2C40991%2C1731246760446.1731246760883 2024-11-10T13:52:40,894 INFO [master/2467a7071e00:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/MasterData/WALs/2467a7071e00,40991,1731246760446/2467a7071e00%2C40991%2C1731246760446.1731246760883 2024-11-10T13:52:40,896 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34561:34561),(127.0.0.1/127.0.0.1:37481:37481),(127.0.0.1/127.0.0.1:43871:43871)] 2024-11-10T13:52:40,897 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-10T13:52:40,897 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T13:52:40,897 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:52:40,897 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:52:40,900 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:52:40,902 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-10T13:52:40,902 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:52:40,903 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:52:40,903 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:52:40,905 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-10T13:52:40,905 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:52:40,905 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T13:52:40,905 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:52:40,908 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-10T13:52:40,908 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:52:40,909 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T13:52:40,909 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:52:40,911 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-10T13:52:40,911 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:52:40,912 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T13:52:40,912 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:52:40,913 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:52:40,913 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:52:40,915 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:52:40,915 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:52:40,915 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-10T13:52:40,917 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:52:40,919 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T13:52:40,920 INFO [master/2467a7071e00:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64464608, jitterRate=-0.03940248489379883}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-10T13:52:40,920 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731246760897Initializing all the Stores at 1731246760899 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731246760899Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731246760900 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731246760900Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731246760900Cleaning up temporary data from old regions at 1731246760915 (+15 ms)Region opened successfully at 1731246760920 (+5 ms) 2024-11-10T13:52:40,921 INFO [master/2467a7071e00:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-10T13:52:40,926 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ea10a4d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2467a7071e00/172.17.0.3:0 2024-11-10T13:52:40,927 INFO [master/2467a7071e00:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-10T13:52:40,927 INFO [master/2467a7071e00:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-10T13:52:40,927 INFO [master/2467a7071e00:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-10T13:52:40,927 INFO [master/2467a7071e00:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-10T13:52:40,928 INFO [master/2467a7071e00:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-10T13:52:40,929 INFO [master/2467a7071e00:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-10T13:52:40,929 INFO [master/2467a7071e00:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-10T13:52:40,935 INFO [master/2467a7071e00:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-10T13:52:40,936 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40991-0x10125051d920000, quorum=127.0.0.1:52921, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-10T13:52:40,944 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-10T13:52:40,945 INFO [master/2467a7071e00:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-10T13:52:40,946 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40991-0x10125051d920000, quorum=127.0.0.1:52921, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-10T13:52:40,955 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-10T13:52:40,955 INFO [master/2467a7071e00:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-10T13:52:40,957 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40991-0x10125051d920000, quorum=127.0.0.1:52921, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-10T13:52:40,965 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-10T13:52:40,967 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40991-0x10125051d920000, quorum=127.0.0.1:52921, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-10T13:52:40,976 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-10T13:52:40,978 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40991-0x10125051d920000, quorum=127.0.0.1:52921, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-10T13:52:40,986 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-10T13:52:40,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33817-0x10125051d920002, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T13:52:40,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43963-0x10125051d920001, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T13:52:40,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40991-0x10125051d920000, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T13:52:40,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38731-0x10125051d920003, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T13:52:40,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33817-0x10125051d920002, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:40,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43963-0x10125051d920001, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:40,998 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40991-0x10125051d920000, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:40,998 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38731-0x10125051d920003, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:40,998 INFO [master/2467a7071e00:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=2467a7071e00,40991,1731246760446, sessionid=0x10125051d920000, setting cluster-up flag (Was=false) 2024-11-10T13:52:41,018 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40991-0x10125051d920000, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:41,018 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43963-0x10125051d920001, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:41,018 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33817-0x10125051d920002, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:41,018 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38731-0x10125051d920003, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:41,050 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-10T13:52:41,051 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2467a7071e00,40991,1731246760446 2024-11-10T13:52:41,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33817-0x10125051d920002, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:41,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38731-0x10125051d920003, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:41,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40991-0x10125051d920000, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:41,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43963-0x10125051d920001, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:41,102 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-10T13:52:41,104 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2467a7071e00,40991,1731246760446 2024-11-10T13:52:41,106 INFO [master/2467a7071e00:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-10T13:52:41,109 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-10T13:52:41,109 INFO [master/2467a7071e00:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-10T13:52:41,109 INFO [master/2467a7071e00:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-10T13:52:41,110 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 2467a7071e00,40991,1731246760446 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-10T13:52:41,112 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/2467a7071e00:0, corePoolSize=5, maxPoolSize=5 2024-11-10T13:52:41,112 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/2467a7071e00:0, corePoolSize=5, maxPoolSize=5 2024-11-10T13:52:41,112 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/2467a7071e00:0, corePoolSize=5, maxPoolSize=5 2024-11-10T13:52:41,112 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/2467a7071e00:0, corePoolSize=5, maxPoolSize=5 2024-11-10T13:52:41,112 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/2467a7071e00:0, corePoolSize=10, maxPoolSize=10 2024-11-10T13:52:41,112 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:41,112 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/2467a7071e00:0, corePoolSize=2, maxPoolSize=2 2024-11-10T13:52:41,113 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:41,115 INFO [master/2467a7071e00:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731246791115 2024-11-10T13:52:41,115 INFO [master/2467a7071e00:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-10T13:52:41,115 INFO [master/2467a7071e00:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-10T13:52:41,115 INFO [master/2467a7071e00:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-10T13:52:41,115 INFO [master/2467a7071e00:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-10T13:52:41,115 INFO [master/2467a7071e00:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-10T13:52:41,115 INFO [master/2467a7071e00:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-10T13:52:41,116 INFO [master/2467a7071e00:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:41,116 INFO [master/2467a7071e00:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-10T13:52:41,116 INFO [master/2467a7071e00:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-10T13:52:41,116 INFO [master/2467a7071e00:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-10T13:52:41,116 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T13:52:41,117 INFO [master/2467a7071e00:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-10T13:52:41,117 INFO [master/2467a7071e00:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-10T13:52:41,117 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-10T13:52:41,117 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/2467a7071e00:0:becomeActiveMaster-HFileCleaner.large.0-1731246761117,5,FailOnTimeoutGroup] 2024-11-10T13:52:41,117 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/2467a7071e00:0:becomeActiveMaster-HFileCleaner.small.0-1731246761117,5,FailOnTimeoutGroup] 2024-11-10T13:52:41,117 INFO [master/2467a7071e00:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:41,117 INFO [master/2467a7071e00:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-10T13:52:41,117 INFO [master/2467a7071e00:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:41,117 INFO [master/2467a7071e00:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:41,118 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:52:41,118 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-10T13:52:41,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41263 is added to blk_1073741831_1007 (size=1321) 2024-11-10T13:52:41,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741831_1007 (size=1321) 2024-11-10T13:52:41,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36097 is added to blk_1073741831_1007 (size=1321) 2024-11-10T13:52:41,129 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-10T13:52:41,129 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1 2024-11-10T13:52:41,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36097 is added to blk_1073741832_1008 (size=32) 2024-11-10T13:52:41,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41263 is added to blk_1073741832_1008 (size=32) 2024-11-10T13:52:41,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741832_1008 (size=32) 2024-11-10T13:52:41,142 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T13:52:41,144 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-10T13:52:41,146 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-10T13:52:41,146 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:52:41,147 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:52:41,147 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-10T13:52:41,148 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-10T13:52:41,148 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:52:41,149 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:52:41,149 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-10T13:52:41,150 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-10T13:52:41,150 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:52:41,151 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:52:41,151 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-10T13:52:41,153 INFO [RS:2;2467a7071e00:38731 {}] regionserver.HRegionServer(746): ClusterId : 091205a3-36eb-4187-afd9-d6adf4851a4d 2024-11-10T13:52:41,153 INFO [RS:1;2467a7071e00:33817 {}] regionserver.HRegionServer(746): ClusterId : 091205a3-36eb-4187-afd9-d6adf4851a4d 2024-11-10T13:52:41,153 INFO [RS:0;2467a7071e00:43963 {}] regionserver.HRegionServer(746): ClusterId : 091205a3-36eb-4187-afd9-d6adf4851a4d 2024-11-10T13:52:41,153 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-10T13:52:41,153 DEBUG [RS:2;2467a7071e00:38731 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-10T13:52:41,153 DEBUG [RS:1;2467a7071e00:33817 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-10T13:52:41,153 DEBUG [RS:0;2467a7071e00:43963 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-10T13:52:41,153 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:52:41,154 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:52:41,154 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-10T13:52:41,155 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/data/hbase/meta/1588230740 2024-11-10T13:52:41,156 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/data/hbase/meta/1588230740 2024-11-10T13:52:41,157 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-10T13:52:41,157 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-10T13:52:41,158 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-10T13:52:41,160 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-10T13:52:41,163 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T13:52:41,163 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74307610, jitterRate=0.10726967453956604}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-10T13:52:41,164 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731246761143Initializing all the Stores at 1731246761144 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731246761144Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731246761144Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731246761144Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731246761144Cleaning up temporary data from old regions at 1731246761157 (+13 ms)Region opened successfully at 1731246761164 (+7 ms) 2024-11-10T13:52:41,164 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-10T13:52:41,165 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-10T13:52:41,165 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-10T13:52:41,165 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-10T13:52:41,165 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-10T13:52:41,165 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-10T13:52:41,165 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731246761164Disabling compacts and flushes for region at 1731246761164Disabling writes for close at 1731246761165 (+1 ms)Writing region close event to WAL at 1731246761165Closed at 1731246761165 2024-11-10T13:52:41,167 DEBUG [RS:2;2467a7071e00:38731 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-10T13:52:41,167 DEBUG [RS:1;2467a7071e00:33817 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-10T13:52:41,167 DEBUG [RS:0;2467a7071e00:43963 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-10T13:52:41,167 DEBUG [RS:2;2467a7071e00:38731 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-10T13:52:41,167 DEBUG [RS:0;2467a7071e00:43963 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-10T13:52:41,167 DEBUG [RS:1;2467a7071e00:33817 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-10T13:52:41,167 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T13:52:41,167 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-10T13:52:41,167 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-10T13:52:41,169 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-10T13:52:41,170 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-10T13:52:41,177 DEBUG [RS:1;2467a7071e00:33817 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-10T13:52:41,177 DEBUG [RS:2;2467a7071e00:38731 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-10T13:52:41,177 DEBUG [RS:0;2467a7071e00:43963 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-10T13:52:41,177 DEBUG [RS:1;2467a7071e00:33817 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@59237c84, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2467a7071e00/172.17.0.3:0 2024-11-10T13:52:41,177 DEBUG [RS:0;2467a7071e00:43963 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ef5930, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2467a7071e00/172.17.0.3:0 2024-11-10T13:52:41,177 DEBUG [RS:2;2467a7071e00:38731 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5993eca8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2467a7071e00/172.17.0.3:0 2024-11-10T13:52:41,188 DEBUG [RS:1;2467a7071e00:33817 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;2467a7071e00:33817 2024-11-10T13:52:41,188 INFO [RS:1;2467a7071e00:33817 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-10T13:52:41,188 INFO [RS:1;2467a7071e00:33817 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-10T13:52:41,188 DEBUG [RS:1;2467a7071e00:33817 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-10T13:52:41,189 INFO [RS:1;2467a7071e00:33817 {}] regionserver.HRegionServer(2659): reportForDuty to master=2467a7071e00,40991,1731246760446 with port=33817, startcode=1731246760683 2024-11-10T13:52:41,189 DEBUG [RS:1;2467a7071e00:33817 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-10T13:52:41,191 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:52599, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-10T13:52:41,191 DEBUG [RS:2;2467a7071e00:38731 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;2467a7071e00:38731 2024-11-10T13:52:41,191 INFO [RS:2;2467a7071e00:38731 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-10T13:52:41,192 INFO [RS:2;2467a7071e00:38731 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-10T13:52:41,192 DEBUG [RS:2;2467a7071e00:38731 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-10T13:52:41,192 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40991 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2467a7071e00,33817,1731246760683 2024-11-10T13:52:41,192 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40991 {}] master.ServerManager(517): Registering regionserver=2467a7071e00,33817,1731246760683 2024-11-10T13:52:41,193 INFO [RS:2;2467a7071e00:38731 {}] regionserver.HRegionServer(2659): reportForDuty to master=2467a7071e00,40991,1731246760446 with port=38731, startcode=1731246760726 2024-11-10T13:52:41,193 DEBUG [RS:2;2467a7071e00:38731 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-10T13:52:41,195 DEBUG [RS:1;2467a7071e00:33817 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1 2024-11-10T13:52:41,195 DEBUG [RS:1;2467a7071e00:33817 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39813 2024-11-10T13:52:41,195 DEBUG [RS:1;2467a7071e00:33817 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-10T13:52:41,195 DEBUG [RS:0;2467a7071e00:43963 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;2467a7071e00:43963 2024-11-10T13:52:41,195 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45339, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-10T13:52:41,195 INFO [RS:0;2467a7071e00:43963 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-10T13:52:41,195 INFO [RS:0;2467a7071e00:43963 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-10T13:52:41,195 DEBUG [RS:0;2467a7071e00:43963 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-10T13:52:41,195 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40991 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2467a7071e00,38731,1731246760726 2024-11-10T13:52:41,196 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40991 {}] master.ServerManager(517): Registering regionserver=2467a7071e00,38731,1731246760726 2024-11-10T13:52:41,196 INFO [RS:0;2467a7071e00:43963 {}] regionserver.HRegionServer(2659): reportForDuty to master=2467a7071e00,40991,1731246760446 with port=43963, startcode=1731246760644 2024-11-10T13:52:41,196 DEBUG [RS:0;2467a7071e00:43963 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-10T13:52:41,198 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:33391, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-10T13:52:41,198 DEBUG [RS:2;2467a7071e00:38731 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1 2024-11-10T13:52:41,198 DEBUG [RS:2;2467a7071e00:38731 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39813 2024-11-10T13:52:41,198 DEBUG [RS:2;2467a7071e00:38731 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-10T13:52:41,198 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40991 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2467a7071e00,43963,1731246760644 2024-11-10T13:52:41,198 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40991 {}] master.ServerManager(517): Registering regionserver=2467a7071e00,43963,1731246760644 2024-11-10T13:52:41,200 DEBUG [RS:0;2467a7071e00:43963 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1 2024-11-10T13:52:41,200 DEBUG [RS:0;2467a7071e00:43963 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39813 2024-11-10T13:52:41,200 DEBUG [RS:0;2467a7071e00:43963 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-10T13:52:41,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40991-0x10125051d920000, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T13:52:41,250 DEBUG [RS:1;2467a7071e00:33817 {}] zookeeper.ZKUtil(111): regionserver:33817-0x10125051d920002, quorum=127.0.0.1:52921, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2467a7071e00,33817,1731246760683 2024-11-10T13:52:41,251 WARN [RS:1;2467a7071e00:33817 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T13:52:41,251 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2467a7071e00,38731,1731246760726] 2024-11-10T13:52:41,251 INFO [RS:1;2467a7071e00:33817 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T13:52:41,251 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2467a7071e00,43963,1731246760644] 2024-11-10T13:52:41,251 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2467a7071e00,33817,1731246760683] 2024-11-10T13:52:41,251 DEBUG [RS:1;2467a7071e00:33817 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/WALs/2467a7071e00,33817,1731246760683 2024-11-10T13:52:41,251 DEBUG [RS:2;2467a7071e00:38731 {}] zookeeper.ZKUtil(111): regionserver:38731-0x10125051d920003, quorum=127.0.0.1:52921, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2467a7071e00,38731,1731246760726 2024-11-10T13:52:41,251 DEBUG [RS:0;2467a7071e00:43963 {}] zookeeper.ZKUtil(111): regionserver:43963-0x10125051d920001, quorum=127.0.0.1:52921, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2467a7071e00,43963,1731246760644 2024-11-10T13:52:41,251 WARN [RS:2;2467a7071e00:38731 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T13:52:41,251 WARN [RS:0;2467a7071e00:43963 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T13:52:41,252 INFO [RS:0;2467a7071e00:43963 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T13:52:41,252 INFO [RS:2;2467a7071e00:38731 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T13:52:41,252 DEBUG [RS:0;2467a7071e00:43963 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/WALs/2467a7071e00,43963,1731246760644 2024-11-10T13:52:41,252 DEBUG [RS:2;2467a7071e00:38731 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/WALs/2467a7071e00,38731,1731246760726 2024-11-10T13:52:41,259 INFO [RS:0;2467a7071e00:43963 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-10T13:52:41,259 INFO [RS:1;2467a7071e00:33817 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-10T13:52:41,259 INFO [RS:2;2467a7071e00:38731 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-10T13:52:41,261 INFO [RS:0;2467a7071e00:43963 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-10T13:52:41,261 INFO [RS:0;2467a7071e00:43963 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-10T13:52:41,262 INFO [RS:0;2467a7071e00:43963 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:41,262 INFO [RS:0;2467a7071e00:43963 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-10T13:52:41,263 INFO [RS:0;2467a7071e00:43963 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-10T13:52:41,263 INFO [RS:0;2467a7071e00:43963 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:41,263 DEBUG [RS:0;2467a7071e00:43963 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:41,263 DEBUG [RS:0;2467a7071e00:43963 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:41,263 DEBUG [RS:0;2467a7071e00:43963 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:41,263 DEBUG [RS:0;2467a7071e00:43963 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:41,263 DEBUG [RS:0;2467a7071e00:43963 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:41,264 DEBUG [RS:0;2467a7071e00:43963 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2467a7071e00:0, corePoolSize=2, maxPoolSize=2 2024-11-10T13:52:41,264 DEBUG [RS:0;2467a7071e00:43963 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:41,264 DEBUG [RS:0;2467a7071e00:43963 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:41,264 DEBUG [RS:0;2467a7071e00:43963 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:41,264 DEBUG [RS:0;2467a7071e00:43963 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:41,264 DEBUG [RS:0;2467a7071e00:43963 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:41,264 DEBUG [RS:0;2467a7071e00:43963 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:41,264 DEBUG [RS:0;2467a7071e00:43963 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2467a7071e00:0, corePoolSize=3, maxPoolSize=3 2024-11-10T13:52:41,264 DEBUG [RS:0;2467a7071e00:43963 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2467a7071e00:0, corePoolSize=3, maxPoolSize=3 2024-11-10T13:52:41,268 INFO [RS:1;2467a7071e00:33817 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-10T13:52:41,268 INFO [RS:2;2467a7071e00:38731 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-10T13:52:41,269 INFO [RS:1;2467a7071e00:33817 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-10T13:52:41,269 INFO [RS:1;2467a7071e00:33817 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:41,269 INFO [RS:0;2467a7071e00:43963 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:41,269 INFO [RS:2;2467a7071e00:38731 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-10T13:52:41,269 INFO [RS:2;2467a7071e00:38731 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:41,269 INFO [RS:0;2467a7071e00:43963 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:41,269 INFO [RS:0;2467a7071e00:43963 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:41,269 INFO [RS:1;2467a7071e00:33817 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-10T13:52:41,269 INFO [RS:0;2467a7071e00:43963 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:41,269 INFO [RS:2;2467a7071e00:38731 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-10T13:52:41,269 INFO [RS:0;2467a7071e00:43963 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:41,269 INFO [RS:0;2467a7071e00:43963 {}] hbase.ChoreService(168): Chore ScheduledChore name=2467a7071e00,43963,1731246760644-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T13:52:41,270 INFO [RS:2;2467a7071e00:38731 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-10T13:52:41,270 INFO [RS:2;2467a7071e00:38731 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:41,270 DEBUG [RS:2;2467a7071e00:38731 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:41,270 DEBUG [RS:2;2467a7071e00:38731 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:41,270 DEBUG [RS:2;2467a7071e00:38731 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:41,270 DEBUG [RS:2;2467a7071e00:38731 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:41,271 DEBUG [RS:2;2467a7071e00:38731 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:41,271 DEBUG [RS:2;2467a7071e00:38731 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2467a7071e00:0, corePoolSize=2, maxPoolSize=2 2024-11-10T13:52:41,271 DEBUG [RS:2;2467a7071e00:38731 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:41,271 DEBUG [RS:2;2467a7071e00:38731 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:41,271 DEBUG [RS:2;2467a7071e00:38731 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:41,271 DEBUG [RS:2;2467a7071e00:38731 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:41,271 DEBUG [RS:2;2467a7071e00:38731 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:41,271 DEBUG [RS:2;2467a7071e00:38731 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:41,271 DEBUG [RS:2;2467a7071e00:38731 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2467a7071e00:0, corePoolSize=3, maxPoolSize=3 2024-11-10T13:52:41,271 INFO [RS:1;2467a7071e00:33817 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-10T13:52:41,271 DEBUG [RS:2;2467a7071e00:38731 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2467a7071e00:0, corePoolSize=3, maxPoolSize=3 2024-11-10T13:52:41,271 INFO [RS:1;2467a7071e00:33817 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:41,271 DEBUG [RS:1;2467a7071e00:33817 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:41,272 DEBUG [RS:1;2467a7071e00:33817 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:41,272 DEBUG [RS:1;2467a7071e00:33817 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:41,272 DEBUG [RS:1;2467a7071e00:33817 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:41,272 DEBUG [RS:1;2467a7071e00:33817 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:41,272 DEBUG [RS:1;2467a7071e00:33817 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2467a7071e00:0, corePoolSize=2, maxPoolSize=2 2024-11-10T13:52:41,272 DEBUG [RS:1;2467a7071e00:33817 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:41,272 DEBUG [RS:1;2467a7071e00:33817 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:41,272 DEBUG [RS:1;2467a7071e00:33817 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:41,272 DEBUG [RS:1;2467a7071e00:33817 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:41,272 DEBUG [RS:1;2467a7071e00:33817 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:41,273 DEBUG [RS:1;2467a7071e00:33817 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2467a7071e00:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:52:41,273 DEBUG [RS:1;2467a7071e00:33817 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2467a7071e00:0, corePoolSize=3, maxPoolSize=3 2024-11-10T13:52:41,273 DEBUG [RS:1;2467a7071e00:33817 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2467a7071e00:0, corePoolSize=3, maxPoolSize=3 2024-11-10T13:52:41,273 INFO [RS:2;2467a7071e00:38731 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:41,273 INFO [RS:2;2467a7071e00:38731 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:41,273 INFO [RS:2;2467a7071e00:38731 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:41,273 INFO [RS:2;2467a7071e00:38731 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:41,273 INFO [RS:2;2467a7071e00:38731 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:41,273 INFO [RS:1;2467a7071e00:33817 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:41,273 INFO [RS:2;2467a7071e00:38731 {}] hbase.ChoreService(168): Chore ScheduledChore name=2467a7071e00,38731,1731246760726-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T13:52:41,273 INFO [RS:1;2467a7071e00:33817 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:41,273 INFO [RS:1;2467a7071e00:33817 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:41,273 INFO [RS:1;2467a7071e00:33817 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:41,273 INFO [RS:1;2467a7071e00:33817 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:41,273 INFO [RS:1;2467a7071e00:33817 {}] hbase.ChoreService(168): Chore ScheduledChore name=2467a7071e00,33817,1731246760683-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T13:52:41,284 INFO [RS:0;2467a7071e00:43963 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-10T13:52:41,284 INFO [RS:0;2467a7071e00:43963 {}] hbase.ChoreService(168): Chore ScheduledChore name=2467a7071e00,43963,1731246760644-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:41,284 INFO [RS:0;2467a7071e00:43963 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:41,285 INFO [RS:0;2467a7071e00:43963 {}] regionserver.Replication(171): 2467a7071e00,43963,1731246760644 started 2024-11-10T13:52:41,295 INFO [RS:1;2467a7071e00:33817 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-10T13:52:41,295 INFO [RS:2;2467a7071e00:38731 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-10T13:52:41,295 INFO [RS:2;2467a7071e00:38731 {}] hbase.ChoreService(168): Chore ScheduledChore name=2467a7071e00,38731,1731246760726-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:41,295 INFO [RS:1;2467a7071e00:33817 {}] hbase.ChoreService(168): Chore ScheduledChore name=2467a7071e00,33817,1731246760683-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:41,295 INFO [RS:1;2467a7071e00:33817 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:41,295 INFO [RS:2;2467a7071e00:38731 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:41,295 INFO [RS:1;2467a7071e00:33817 {}] regionserver.Replication(171): 2467a7071e00,33817,1731246760683 started 2024-11-10T13:52:41,295 INFO [RS:2;2467a7071e00:38731 {}] regionserver.Replication(171): 2467a7071e00,38731,1731246760726 started 2024-11-10T13:52:41,298 INFO [RS:0;2467a7071e00:43963 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:41,298 INFO [RS:0;2467a7071e00:43963 {}] regionserver.HRegionServer(1482): Serving as 2467a7071e00,43963,1731246760644, RpcServer on 2467a7071e00/172.17.0.3:43963, sessionid=0x10125051d920001 2024-11-10T13:52:41,298 DEBUG [RS:0;2467a7071e00:43963 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-10T13:52:41,298 DEBUG [RS:0;2467a7071e00:43963 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2467a7071e00,43963,1731246760644 2024-11-10T13:52:41,298 DEBUG [RS:0;2467a7071e00:43963 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2467a7071e00,43963,1731246760644' 2024-11-10T13:52:41,298 DEBUG [RS:0;2467a7071e00:43963 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-10T13:52:41,299 DEBUG [RS:0;2467a7071e00:43963 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-10T13:52:41,299 DEBUG [RS:0;2467a7071e00:43963 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-10T13:52:41,299 DEBUG [RS:0;2467a7071e00:43963 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-10T13:52:41,299 DEBUG [RS:0;2467a7071e00:43963 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2467a7071e00,43963,1731246760644 2024-11-10T13:52:41,299 DEBUG [RS:0;2467a7071e00:43963 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2467a7071e00,43963,1731246760644' 2024-11-10T13:52:41,299 DEBUG [RS:0;2467a7071e00:43963 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-10T13:52:41,300 DEBUG [RS:0;2467a7071e00:43963 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-10T13:52:41,300 DEBUG [RS:0;2467a7071e00:43963 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-10T13:52:41,300 INFO [RS:0;2467a7071e00:43963 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-10T13:52:41,300 INFO [RS:0;2467a7071e00:43963 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-10T13:52:41,309 INFO [RS:1;2467a7071e00:33817 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:41,309 INFO [RS:2;2467a7071e00:38731 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:41,309 INFO [RS:1;2467a7071e00:33817 {}] regionserver.HRegionServer(1482): Serving as 2467a7071e00,33817,1731246760683, RpcServer on 2467a7071e00/172.17.0.3:33817, sessionid=0x10125051d920002 2024-11-10T13:52:41,309 INFO [RS:2;2467a7071e00:38731 {}] regionserver.HRegionServer(1482): Serving as 2467a7071e00,38731,1731246760726, RpcServer on 2467a7071e00/172.17.0.3:38731, sessionid=0x10125051d920003 2024-11-10T13:52:41,309 DEBUG [RS:2;2467a7071e00:38731 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-10T13:52:41,309 DEBUG [RS:1;2467a7071e00:33817 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-10T13:52:41,309 DEBUG [RS:2;2467a7071e00:38731 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2467a7071e00,38731,1731246760726 2024-11-10T13:52:41,309 DEBUG [RS:1;2467a7071e00:33817 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2467a7071e00,33817,1731246760683 2024-11-10T13:52:41,309 DEBUG [RS:2;2467a7071e00:38731 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2467a7071e00,38731,1731246760726' 2024-11-10T13:52:41,309 DEBUG [RS:1;2467a7071e00:33817 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2467a7071e00,33817,1731246760683' 2024-11-10T13:52:41,309 DEBUG [RS:2;2467a7071e00:38731 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-10T13:52:41,309 DEBUG [RS:1;2467a7071e00:33817 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-10T13:52:41,310 DEBUG [RS:2;2467a7071e00:38731 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-10T13:52:41,310 DEBUG [RS:1;2467a7071e00:33817 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-10T13:52:41,311 DEBUG [RS:2;2467a7071e00:38731 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-10T13:52:41,311 DEBUG [RS:2;2467a7071e00:38731 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-10T13:52:41,311 DEBUG [RS:1;2467a7071e00:33817 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-10T13:52:41,311 DEBUG [RS:2;2467a7071e00:38731 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2467a7071e00,38731,1731246760726 2024-11-10T13:52:41,311 DEBUG [RS:1;2467a7071e00:33817 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-10T13:52:41,311 DEBUG [RS:2;2467a7071e00:38731 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2467a7071e00,38731,1731246760726' 2024-11-10T13:52:41,311 DEBUG [RS:1;2467a7071e00:33817 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2467a7071e00,33817,1731246760683 2024-11-10T13:52:41,311 DEBUG [RS:2;2467a7071e00:38731 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-10T13:52:41,311 DEBUG [RS:1;2467a7071e00:33817 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2467a7071e00,33817,1731246760683' 2024-11-10T13:52:41,311 DEBUG [RS:1;2467a7071e00:33817 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-10T13:52:41,311 DEBUG [RS:2;2467a7071e00:38731 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-10T13:52:41,311 DEBUG [RS:1;2467a7071e00:33817 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-10T13:52:41,312 DEBUG [RS:1;2467a7071e00:33817 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-10T13:52:41,312 INFO [RS:1;2467a7071e00:33817 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-10T13:52:41,312 DEBUG [RS:2;2467a7071e00:38731 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-10T13:52:41,312 INFO [RS:1;2467a7071e00:33817 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-10T13:52:41,312 INFO [RS:2;2467a7071e00:38731 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-10T13:52:41,312 INFO [RS:2;2467a7071e00:38731 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-10T13:52:41,320 WARN [2467a7071e00:40991 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-10T13:52:41,407 INFO [RS:0;2467a7071e00:43963 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2467a7071e00%2C43963%2C1731246760644, suffix=, logDir=hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/WALs/2467a7071e00,43963,1731246760644, archiveDir=hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/oldWALs, maxLogs=32 2024-11-10T13:52:41,412 INFO [RS:0;2467a7071e00:43963 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2467a7071e00%2C43963%2C1731246760644.1731246761412 2024-11-10T13:52:41,414 INFO [RS:1;2467a7071e00:33817 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2467a7071e00%2C33817%2C1731246760683, suffix=, logDir=hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/WALs/2467a7071e00,33817,1731246760683, archiveDir=hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/oldWALs, maxLogs=32 2024-11-10T13:52:41,414 INFO [RS:2;2467a7071e00:38731 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2467a7071e00%2C38731%2C1731246760726, suffix=, logDir=hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/WALs/2467a7071e00,38731,1731246760726, archiveDir=hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/oldWALs, maxLogs=32 2024-11-10T13:52:41,417 INFO [RS:1;2467a7071e00:33817 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2467a7071e00%2C33817%2C1731246760683.1731246761417 2024-11-10T13:52:41,417 INFO [RS:2;2467a7071e00:38731 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2467a7071e00%2C38731%2C1731246760726.1731246761417 2024-11-10T13:52:41,421 INFO [RS:0;2467a7071e00:43963 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/WALs/2467a7071e00,43963,1731246760644/2467a7071e00%2C43963%2C1731246760644.1731246761412 2024-11-10T13:52:41,424 DEBUG [RS:0;2467a7071e00:43963 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34561:34561),(127.0.0.1/127.0.0.1:43871:43871),(127.0.0.1/127.0.0.1:37481:37481)] 2024-11-10T13:52:41,428 INFO [RS:2;2467a7071e00:38731 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/WALs/2467a7071e00,38731,1731246760726/2467a7071e00%2C38731%2C1731246760726.1731246761417 2024-11-10T13:52:41,428 INFO [RS:1;2467a7071e00:33817 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/WALs/2467a7071e00,33817,1731246760683/2467a7071e00%2C33817%2C1731246760683.1731246761417 2024-11-10T13:52:41,429 DEBUG [RS:1;2467a7071e00:33817 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37481:37481),(127.0.0.1/127.0.0.1:34561:34561),(127.0.0.1/127.0.0.1:43871:43871)] 2024-11-10T13:52:41,429 DEBUG [RS:2;2467a7071e00:38731 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43871:43871),(127.0.0.1/127.0.0.1:34561:34561),(127.0.0.1/127.0.0.1:37481:37481)] 2024-11-10T13:52:41,571 DEBUG [2467a7071e00:40991 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-10T13:52:41,571 DEBUG [2467a7071e00:40991 {}] balancer.BalancerClusterState(204): Hosts are {2467a7071e00=0} racks are {/default-rack=0} 2024-11-10T13:52:41,576 DEBUG [2467a7071e00:40991 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-10T13:52:41,576 DEBUG [2467a7071e00:40991 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-10T13:52:41,576 DEBUG [2467a7071e00:40991 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-10T13:52:41,576 DEBUG [2467a7071e00:40991 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-10T13:52:41,576 DEBUG [2467a7071e00:40991 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-10T13:52:41,576 DEBUG [2467a7071e00:40991 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-10T13:52:41,576 INFO [2467a7071e00:40991 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-10T13:52:41,576 INFO [2467a7071e00:40991 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-10T13:52:41,577 INFO [2467a7071e00:40991 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-10T13:52:41,577 DEBUG [2467a7071e00:40991 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-10T13:52:41,577 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=2467a7071e00,38731,1731246760726 2024-11-10T13:52:41,580 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2467a7071e00,38731,1731246760726, state=OPENING 2024-11-10T13:52:41,658 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-10T13:52:41,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33817-0x10125051d920002, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:41,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40991-0x10125051d920000, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:41,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43963-0x10125051d920001, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:41,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38731-0x10125051d920003, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:41,673 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T13:52:41,673 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-10T13:52:41,673 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T13:52:41,673 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T13:52:41,673 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=2467a7071e00,38731,1731246760726}] 2024-11-10T13:52:41,674 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T13:52:41,829 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-10T13:52:41,832 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:33487, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-10T13:52:41,834 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-10T13:52:41,835 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-10T13:52:41,841 INFO [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-10T13:52:41,841 INFO [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T13:52:41,844 INFO [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2467a7071e00%2C38731%2C1731246760726.meta, suffix=.meta, logDir=hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/WALs/2467a7071e00,38731,1731246760726, archiveDir=hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/oldWALs, maxLogs=32 2024-11-10T13:52:41,845 INFO [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 2467a7071e00%2C38731%2C1731246760726.meta.1731246761845.meta 2024-11-10T13:52:41,854 INFO [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/WALs/2467a7071e00,38731,1731246760726/2467a7071e00%2C38731%2C1731246760726.meta.1731246761845.meta 2024-11-10T13:52:41,856 DEBUG [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34561:34561),(127.0.0.1/127.0.0.1:43871:43871),(127.0.0.1/127.0.0.1:37481:37481)] 2024-11-10T13:52:41,857 DEBUG [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-10T13:52:41,858 DEBUG [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-10T13:52:41,858 DEBUG [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-10T13:52:41,858 INFO [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-10T13:52:41,858 DEBUG [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-10T13:52:41,858 DEBUG [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T13:52:41,858 DEBUG [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-10T13:52:41,858 DEBUG [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-10T13:52:41,860 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-10T13:52:41,861 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-10T13:52:41,861 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:52:41,862 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:52:41,862 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-10T13:52:41,863 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-10T13:52:41,863 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:52:41,863 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:52:41,864 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-10T13:52:41,864 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-10T13:52:41,864 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:52:41,865 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:52:41,865 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-10T13:52:41,866 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-10T13:52:41,866 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:52:41,866 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:52:41,867 DEBUG [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-10T13:52:41,868 DEBUG [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/data/hbase/meta/1588230740 2024-11-10T13:52:41,869 DEBUG [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/data/hbase/meta/1588230740 2024-11-10T13:52:41,870 DEBUG [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-10T13:52:41,870 DEBUG [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-10T13:52:41,871 DEBUG [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-10T13:52:41,873 DEBUG [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-10T13:52:41,874 INFO [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72190894, jitterRate=0.0757281482219696}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-10T13:52:41,874 DEBUG [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-10T13:52:41,875 DEBUG [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731246761858Writing region info on filesystem at 1731246761858Initializing all the Stores at 1731246761860 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731246761860Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731246761860Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731246761860Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731246761860Cleaning up temporary data from old regions at 1731246761870 (+10 ms)Running coprocessor post-open hooks at 1731246761874 (+4 ms)Region opened successfully at 1731246761874 2024-11-10T13:52:41,876 INFO [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731246761828 2024-11-10T13:52:41,880 DEBUG [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-10T13:52:41,880 INFO [RS_OPEN_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-10T13:52:41,881 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=2467a7071e00,38731,1731246760726 2024-11-10T13:52:41,883 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2467a7071e00,38731,1731246760726, state=OPEN 2024-11-10T13:52:41,913 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40991-0x10125051d920000, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T13:52:41,913 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33817-0x10125051d920002, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T13:52:41,913 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38731-0x10125051d920003, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T13:52:41,913 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43963-0x10125051d920001, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T13:52:41,913 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=2467a7071e00,38731,1731246760726 2024-11-10T13:52:41,913 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T13:52:41,913 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T13:52:41,913 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T13:52:41,913 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T13:52:41,918 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-10T13:52:41,918 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=2467a7071e00,38731,1731246760726 in 240 msec 2024-11-10T13:52:41,922 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-10T13:52:41,922 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 751 msec 2024-11-10T13:52:41,924 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T13:52:41,924 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-10T13:52:41,926 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-10T13:52:41,926 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2467a7071e00,38731,1731246760726, seqNum=-1] 2024-11-10T13:52:41,926 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T13:52:41,928 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:33459, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T13:52:41,937 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 828 msec 2024-11-10T13:52:41,937 INFO [master/2467a7071e00:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731246761937, completionTime=-1 2024-11-10T13:52:41,937 INFO [master/2467a7071e00:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-10T13:52:41,937 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-10T13:52:41,939 INFO [master/2467a7071e00:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=3 2024-11-10T13:52:41,939 INFO [master/2467a7071e00:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731246821939 2024-11-10T13:52:41,940 INFO [master/2467a7071e00:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731246881940 2024-11-10T13:52:41,940 INFO [master/2467a7071e00:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-10T13:52:41,940 INFO [master/2467a7071e00:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2467a7071e00,40991,1731246760446-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:41,940 INFO [master/2467a7071e00:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2467a7071e00,40991,1731246760446-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:41,940 INFO [master/2467a7071e00:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2467a7071e00,40991,1731246760446-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:41,940 INFO [master/2467a7071e00:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-2467a7071e00:40991, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:41,940 INFO [master/2467a7071e00:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:41,941 INFO [master/2467a7071e00:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:41,943 DEBUG [master/2467a7071e00:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-10T13:52:41,946 INFO [master/2467a7071e00:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.149sec 2024-11-10T13:52:41,946 INFO [master/2467a7071e00:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-10T13:52:41,946 INFO [master/2467a7071e00:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-10T13:52:41,946 INFO [master/2467a7071e00:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-10T13:52:41,946 INFO [master/2467a7071e00:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-10T13:52:41,946 INFO [master/2467a7071e00:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-10T13:52:41,946 INFO [master/2467a7071e00:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2467a7071e00,40991,1731246760446-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T13:52:41,946 INFO [master/2467a7071e00:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2467a7071e00,40991,1731246760446-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-10T13:52:41,949 DEBUG [master/2467a7071e00:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-10T13:52:41,949 INFO [master/2467a7071e00:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-10T13:52:41,949 INFO [master/2467a7071e00:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2467a7071e00,40991,1731246760446-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:52:41,954 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23e33ccd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T13:52:41,954 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 2467a7071e00,40991,-1 for getting cluster id 2024-11-10T13:52:41,954 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-10T13:52:41,955 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '091205a3-36eb-4187-afd9-d6adf4851a4d' 2024-11-10T13:52:41,956 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-10T13:52:41,956 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "091205a3-36eb-4187-afd9-d6adf4851a4d" 2024-11-10T13:52:41,956 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4540ee89, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T13:52:41,956 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2467a7071e00,40991,-1] 2024-11-10T13:52:41,957 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-10T13:52:41,957 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:52:41,958 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55064, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-10T13:52:41,959 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@413f20d8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T13:52:41,960 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-10T13:52:41,961 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2467a7071e00,38731,1731246760726, seqNum=-1] 2024-11-10T13:52:41,962 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T13:52:41,963 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55726, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T13:52:41,965 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=2467a7071e00,40991,1731246760446 2024-11-10T13:52:41,967 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-10T13:52:41,968 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncConnectionImpl(321): The fetched master address is 2467a7071e00,40991,1731246760446 2024-11-10T13:52:41,968 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@31bfa03a 2024-11-10T13:52:41,968 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-10T13:52:41,970 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55080, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-10T13:52:41,972 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40991 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-10T13:52:41,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40991 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-11-10T13:52:41,977 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-11-10T13:52:41,977 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:52:41,977 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40991 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-11-10T13:52:41,979 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-10T13:52:41,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40991 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T13:52:41,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36097 is added to blk_1073741837_1013 (size=392) 2024-11-10T13:52:41,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41263 is added to blk_1073741837_1013 (size=392) 2024-11-10T13:52:41,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741837_1013 (size=392) 2024-11-10T13:52:42,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40991 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T13:52:42,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40991 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T13:52:42,394 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => ad324c097dab92f7a265da8a0675db5a, NAME => 'TestHBaseWalOnEC,,1731246761971.ad324c097dab92f7a265da8a0675db5a.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1 2024-11-10T13:52:42,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36097 is added to blk_1073741838_1014 (size=51) 2024-11-10T13:52:42,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41263 is added to blk_1073741838_1014 (size=51) 2024-11-10T13:52:42,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741838_1014 (size=51) 2024-11-10T13:52:42,409 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731246761971.ad324c097dab92f7a265da8a0675db5a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T13:52:42,409 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing ad324c097dab92f7a265da8a0675db5a, disabling compactions & flushes 2024-11-10T13:52:42,409 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731246761971.ad324c097dab92f7a265da8a0675db5a. 2024-11-10T13:52:42,409 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731246761971.ad324c097dab92f7a265da8a0675db5a. 2024-11-10T13:52:42,409 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731246761971.ad324c097dab92f7a265da8a0675db5a. after waiting 0 ms 2024-11-10T13:52:42,409 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731246761971.ad324c097dab92f7a265da8a0675db5a. 2024-11-10T13:52:42,409 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731246761971.ad324c097dab92f7a265da8a0675db5a. 2024-11-10T13:52:42,409 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for ad324c097dab92f7a265da8a0675db5a: Waiting for close lock at 1731246762409Disabling compacts and flushes for region at 1731246762409Disabling writes for close at 1731246762409Writing region close event to WAL at 1731246762409Closed at 1731246762409 2024-11-10T13:52:42,412 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-11-10T13:52:42,412 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1731246761971.ad324c097dab92f7a265da8a0675db5a.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1731246762412"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731246762412"}]},"ts":"1731246762412"} 2024-11-10T13:52:42,416 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-10T13:52:42,419 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-10T13:52:42,419 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731246762419"}]},"ts":"1731246762419"} 2024-11-10T13:52:42,422 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-11-10T13:52:42,422 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {2467a7071e00=0} racks are {/default-rack=0} 2024-11-10T13:52:42,423 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-10T13:52:42,423 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-10T13:52:42,424 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-10T13:52:42,424 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-10T13:52:42,424 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-10T13:52:42,424 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-10T13:52:42,424 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-10T13:52:42,424 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-10T13:52:42,424 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-10T13:52:42,424 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-10T13:52:42,424 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=ad324c097dab92f7a265da8a0675db5a, ASSIGN}] 2024-11-10T13:52:42,426 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=ad324c097dab92f7a265da8a0675db5a, ASSIGN 2024-11-10T13:52:42,428 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=ad324c097dab92f7a265da8a0675db5a, ASSIGN; state=OFFLINE, location=2467a7071e00,33817,1731246760683; forceNewPlan=false, retain=false 2024-11-10T13:52:42,578 INFO [2467a7071e00:40991 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-10T13:52:42,579 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ad324c097dab92f7a265da8a0675db5a, regionState=OPENING, regionLocation=2467a7071e00,33817,1731246760683 2024-11-10T13:52:42,583 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=ad324c097dab92f7a265da8a0675db5a, ASSIGN because future has completed 2024-11-10T13:52:42,584 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ad324c097dab92f7a265da8a0675db5a, server=2467a7071e00,33817,1731246760683}] 2024-11-10T13:52:42,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40991 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T13:52:42,738 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-10T13:52:42,743 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:35591, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-10T13:52:42,751 INFO [RS_OPEN_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1731246761971.ad324c097dab92f7a265da8a0675db5a. 2024-11-10T13:52:42,751 DEBUG [RS_OPEN_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => ad324c097dab92f7a265da8a0675db5a, NAME => 'TestHBaseWalOnEC,,1731246761971.ad324c097dab92f7a265da8a0675db5a.', STARTKEY => '', ENDKEY => ''} 2024-11-10T13:52:42,752 DEBUG [RS_OPEN_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC ad324c097dab92f7a265da8a0675db5a 2024-11-10T13:52:42,752 DEBUG [RS_OPEN_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731246761971.ad324c097dab92f7a265da8a0675db5a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T13:52:42,752 DEBUG [RS_OPEN_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for ad324c097dab92f7a265da8a0675db5a 2024-11-10T13:52:42,752 DEBUG [RS_OPEN_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for ad324c097dab92f7a265da8a0675db5a 2024-11-10T13:52:42,754 INFO [StoreOpener-ad324c097dab92f7a265da8a0675db5a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ad324c097dab92f7a265da8a0675db5a 2024-11-10T13:52:42,756 INFO [StoreOpener-ad324c097dab92f7a265da8a0675db5a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ad324c097dab92f7a265da8a0675db5a columnFamilyName cf 2024-11-10T13:52:42,757 DEBUG [StoreOpener-ad324c097dab92f7a265da8a0675db5a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:52:42,757 INFO [StoreOpener-ad324c097dab92f7a265da8a0675db5a-1 {}] regionserver.HStore(327): Store=ad324c097dab92f7a265da8a0675db5a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T13:52:42,758 DEBUG [RS_OPEN_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for ad324c097dab92f7a265da8a0675db5a 2024-11-10T13:52:42,759 DEBUG [RS_OPEN_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/data/default/TestHBaseWalOnEC/ad324c097dab92f7a265da8a0675db5a 2024-11-10T13:52:42,759 DEBUG [RS_OPEN_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/data/default/TestHBaseWalOnEC/ad324c097dab92f7a265da8a0675db5a 2024-11-10T13:52:42,760 DEBUG [RS_OPEN_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for ad324c097dab92f7a265da8a0675db5a 2024-11-10T13:52:42,760 DEBUG [RS_OPEN_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for ad324c097dab92f7a265da8a0675db5a 2024-11-10T13:52:42,762 DEBUG [RS_OPEN_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for ad324c097dab92f7a265da8a0675db5a 2024-11-10T13:52:42,765 DEBUG [RS_OPEN_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/data/default/TestHBaseWalOnEC/ad324c097dab92f7a265da8a0675db5a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T13:52:42,765 INFO [RS_OPEN_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened ad324c097dab92f7a265da8a0675db5a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68217240, jitterRate=0.01651608943939209}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-10T13:52:42,765 DEBUG [RS_OPEN_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ad324c097dab92f7a265da8a0675db5a 2024-11-10T13:52:42,766 DEBUG [RS_OPEN_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for ad324c097dab92f7a265da8a0675db5a: Running coprocessor pre-open hook at 1731246762752Writing region info on filesystem at 1731246762752Initializing all the Stores at 1731246762754 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731246762754Cleaning up temporary data from old regions at 1731246762760 (+6 ms)Running coprocessor post-open hooks at 1731246762765 (+5 ms)Region opened successfully at 1731246762766 (+1 ms) 2024-11-10T13:52:42,768 INFO [RS_OPEN_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1731246761971.ad324c097dab92f7a265da8a0675db5a., pid=6, masterSystemTime=1731246762738 2024-11-10T13:52:42,771 DEBUG [RS_OPEN_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1731246761971.ad324c097dab92f7a265da8a0675db5a. 2024-11-10T13:52:42,771 INFO [RS_OPEN_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1731246761971.ad324c097dab92f7a265da8a0675db5a. 2024-11-10T13:52:42,772 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ad324c097dab92f7a265da8a0675db5a, regionState=OPEN, openSeqNum=2, regionLocation=2467a7071e00,33817,1731246760683 2024-11-10T13:52:42,780 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ad324c097dab92f7a265da8a0675db5a, server=2467a7071e00,33817,1731246760683 because future has completed 2024-11-10T13:52:42,782 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40991 {}] assignment.AssignmentManager(1535): Unable to acquire lock for regionNode state=OPEN, location=2467a7071e00,33817,1731246760683, table=TestHBaseWalOnEC, region=ad324c097dab92f7a265da8a0675db5a. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-10T13:52:42,787 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-10T13:52:42,788 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure ad324c097dab92f7a265da8a0675db5a, server=2467a7071e00,33817,1731246760683 in 200 msec 2024-11-10T13:52:42,792 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-10T13:52:42,792 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=ad324c097dab92f7a265da8a0675db5a, ASSIGN in 363 msec 2024-11-10T13:52:42,793 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-10T13:52:42,793 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731246762793"}]},"ts":"1731246762793"} 2024-11-10T13:52:42,796 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-11-10T13:52:42,797 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-11-10T13:52:42,800 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 825 msec 2024-11-10T13:52:43,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40991 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T13:52:43,120 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-11-10T13:52:43,120 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-11-10T13:52:43,121 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-10T13:52:43,125 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-11-10T13:52:43,125 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-10T13:52:43,125 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-11-10T13:52:43,131 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1731246761971.ad324c097dab92f7a265da8a0675db5a., hostname=2467a7071e00,33817,1731246760683, seqNum=2] 2024-11-10T13:52:43,132 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T13:52:43,134 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40376, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T13:52:43,138 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40991 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestHBaseWalOnEC 2024-11-10T13:52:43,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40991 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-11-10T13:52:43,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40991 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-10T13:52:43,142 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-11-10T13:52:43,144 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-10T13:52:43,144 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-10T13:52:43,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40991 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-10T13:52:43,300 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33817 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-10T13:52:43,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2467a7071e00:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1731246761971.ad324c097dab92f7a265da8a0675db5a. 2024-11-10T13:52:43,302 INFO [RS_FLUSH_OPERATIONS-regionserver/2467a7071e00:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing ad324c097dab92f7a265da8a0675db5a 1/1 column families, dataSize=32 B heapSize=360 B 2024-11-10T13:52:43,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2467a7071e00:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/data/default/TestHBaseWalOnEC/ad324c097dab92f7a265da8a0675db5a/.tmp/cf/5fffe94dfbec4ac2b39c7aa1255cd83c is 36, key is row/cf:cq/1731246763135/Put/seqid=0 2024-11-10T13:52:43,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36097 is added to blk_1073741839_1015 (size=4787) 2024-11-10T13:52:43,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741839_1015 (size=4787) 2024-11-10T13:52:43,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41263 is added to blk_1073741839_1015 (size=4787) 2024-11-10T13:52:43,330 INFO [RS_FLUSH_OPERATIONS-regionserver/2467a7071e00:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/data/default/TestHBaseWalOnEC/ad324c097dab92f7a265da8a0675db5a/.tmp/cf/5fffe94dfbec4ac2b39c7aa1255cd83c 2024-11-10T13:52:43,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2467a7071e00:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/data/default/TestHBaseWalOnEC/ad324c097dab92f7a265da8a0675db5a/.tmp/cf/5fffe94dfbec4ac2b39c7aa1255cd83c as hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/data/default/TestHBaseWalOnEC/ad324c097dab92f7a265da8a0675db5a/cf/5fffe94dfbec4ac2b39c7aa1255cd83c 2024-11-10T13:52:43,346 INFO [RS_FLUSH_OPERATIONS-regionserver/2467a7071e00:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/data/default/TestHBaseWalOnEC/ad324c097dab92f7a265da8a0675db5a/cf/5fffe94dfbec4ac2b39c7aa1255cd83c, entries=1, sequenceid=5, filesize=4.7 K 2024-11-10T13:52:43,347 INFO [RS_FLUSH_OPERATIONS-regionserver/2467a7071e00:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for ad324c097dab92f7a265da8a0675db5a in 46ms, sequenceid=5, compaction requested=false 2024-11-10T13:52:43,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2467a7071e00:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for ad324c097dab92f7a265da8a0675db5a: 2024-11-10T13:52:43,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2467a7071e00:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1731246761971.ad324c097dab92f7a265da8a0675db5a. 2024-11-10T13:52:43,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2467a7071e00:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-10T13:52:43,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40991 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-10T13:52:43,353 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-10T13:52:43,353 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 206 msec 2024-11-10T13:52:43,356 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 215 msec 2024-11-10T13:52:43,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40991 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-10T13:52:43,461 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-11-10T13:52:43,470 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-10T13:52:43,470 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-10T13:52:43,470 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T13:52:43,471 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:52:43,471 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:52:43,471 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-10T13:52:43,471 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-10T13:52:43,472 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1946831239, stopped=false 2024-11-10T13:52:43,472 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=2467a7071e00,40991,1731246760446 2024-11-10T13:52:43,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40991-0x10125051d920000, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T13:52:43,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33817-0x10125051d920002, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T13:52:43,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38731-0x10125051d920003, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T13:52:43,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43963-0x10125051d920001, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T13:52:43,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38731-0x10125051d920003, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:43,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40991-0x10125051d920000, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:43,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43963-0x10125051d920001, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:43,598 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-10T13:52:43,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33817-0x10125051d920002, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:43,599 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40991-0x10125051d920000, quorum=127.0.0.1:52921, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T13:52:43,599 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43963-0x10125051d920001, quorum=127.0.0.1:52921, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T13:52:43,599 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33817-0x10125051d920002, quorum=127.0.0.1:52921, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T13:52:43,600 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38731-0x10125051d920003, quorum=127.0.0.1:52921, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T13:52:43,600 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-10T13:52:43,600 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T13:52:43,601 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:52:43,601 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2467a7071e00,43963,1731246760644' ***** 2024-11-10T13:52:43,601 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-10T13:52:43,601 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2467a7071e00,33817,1731246760683' ***** 2024-11-10T13:52:43,602 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-10T13:52:43,602 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2467a7071e00,38731,1731246760726' ***** 2024-11-10T13:52:43,602 INFO [RS:0;2467a7071e00:43963 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-10T13:52:43,602 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-10T13:52:43,602 INFO [RS:0;2467a7071e00:43963 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-10T13:52:43,602 INFO [RS:1;2467a7071e00:33817 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-10T13:52:43,602 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-10T13:52:43,602 INFO [RS:0;2467a7071e00:43963 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-10T13:52:43,602 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-10T13:52:43,602 INFO [RS:2;2467a7071e00:38731 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-10T13:52:43,602 INFO [RS:0;2467a7071e00:43963 {}] regionserver.HRegionServer(959): stopping server 2467a7071e00,43963,1731246760644 2024-11-10T13:52:43,602 INFO [RS:1;2467a7071e00:33817 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-10T13:52:43,603 INFO [RS:1;2467a7071e00:33817 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-10T13:52:43,603 INFO [RS:2;2467a7071e00:38731 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-10T13:52:43,603 INFO [RS:0;2467a7071e00:43963 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T13:52:43,603 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-10T13:52:43,603 INFO [RS:2;2467a7071e00:38731 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-10T13:52:43,603 INFO [RS:1;2467a7071e00:33817 {}] regionserver.HRegionServer(3091): Received CLOSE for ad324c097dab92f7a265da8a0675db5a 2024-11-10T13:52:43,603 INFO [RS:0;2467a7071e00:43963 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;2467a7071e00:43963. 2024-11-10T13:52:43,603 INFO [RS:2;2467a7071e00:38731 {}] regionserver.HRegionServer(959): stopping server 2467a7071e00,38731,1731246760726 2024-11-10T13:52:43,603 INFO [RS:2;2467a7071e00:38731 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T13:52:43,603 DEBUG [RS:0;2467a7071e00:43963 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T13:52:43,603 DEBUG [RS:0;2467a7071e00:43963 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:52:43,603 INFO [RS:1;2467a7071e00:33817 {}] regionserver.HRegionServer(959): stopping server 2467a7071e00,33817,1731246760683 2024-11-10T13:52:43,603 INFO [RS:2;2467a7071e00:38731 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;2467a7071e00:38731. 2024-11-10T13:52:43,603 INFO [RS:1;2467a7071e00:33817 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T13:52:43,603 INFO [RS:0;2467a7071e00:43963 {}] regionserver.HRegionServer(976): stopping server 2467a7071e00,43963,1731246760644; all regions closed. 2024-11-10T13:52:43,603 DEBUG [RS:2;2467a7071e00:38731 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T13:52:43,604 INFO [RS:1;2467a7071e00:33817 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;2467a7071e00:33817. 2024-11-10T13:52:43,603 DEBUG [RS_CLOSE_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing ad324c097dab92f7a265da8a0675db5a, disabling compactions & flushes 2024-11-10T13:52:43,604 DEBUG [RS:2;2467a7071e00:38731 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:52:43,604 INFO [RS_CLOSE_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731246761971.ad324c097dab92f7a265da8a0675db5a. 2024-11-10T13:52:43,604 DEBUG [RS:1;2467a7071e00:33817 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T13:52:43,604 INFO [RS:2;2467a7071e00:38731 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-10T13:52:43,604 DEBUG [RS_CLOSE_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731246761971.ad324c097dab92f7a265da8a0675db5a. 2024-11-10T13:52:43,604 DEBUG [RS:1;2467a7071e00:33817 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:52:43,604 INFO [RS:2;2467a7071e00:38731 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-10T13:52:43,604 DEBUG [RS_CLOSE_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731246761971.ad324c097dab92f7a265da8a0675db5a. after waiting 0 ms 2024-11-10T13:52:43,604 INFO [RS:2;2467a7071e00:38731 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-10T13:52:43,604 INFO [RS:1;2467a7071e00:33817 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-10T13:52:43,604 DEBUG [RS_CLOSE_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731246761971.ad324c097dab92f7a265da8a0675db5a. 2024-11-10T13:52:43,604 INFO [RS:2;2467a7071e00:38731 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-10T13:52:43,604 DEBUG [RS:1;2467a7071e00:33817 {}] regionserver.HRegionServer(1325): Online Regions={ad324c097dab92f7a265da8a0675db5a=TestHBaseWalOnEC,,1731246761971.ad324c097dab92f7a265da8a0675db5a.} 2024-11-10T13:52:43,604 DEBUG [RS:1;2467a7071e00:33817 {}] regionserver.HRegionServer(1351): Waiting on ad324c097dab92f7a265da8a0675db5a 2024-11-10T13:52:43,605 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:52:43,605 INFO [RS:2;2467a7071e00:38731 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-10T13:52:43,605 DEBUG [RS:2;2467a7071e00:38731 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-10T13:52:43,605 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:52:43,605 DEBUG [RS:2;2467a7071e00:38731 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-10T13:52:43,605 DEBUG [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-10T13:52:43,605 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:52:43,605 INFO [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-10T13:52:43,605 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:52:43,605 DEBUG [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-10T13:52:43,605 DEBUG [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-10T13:52:43,605 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:52:43,605 DEBUG [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-10T13:52:43,605 INFO [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-11-10T13:52:43,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41263 is added to blk_1073741833_1009 (size=93) 2024-11-10T13:52:43,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741833_1009 (size=93) 2024-11-10T13:52:43,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36097 is added to blk_1073741833_1009 (size=93) 2024-11-10T13:52:43,612 DEBUG [RS:0;2467a7071e00:43963 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/oldWALs 2024-11-10T13:52:43,612 INFO [RS:0;2467a7071e00:43963 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 2467a7071e00%2C43963%2C1731246760644:(num 1731246761412) 2024-11-10T13:52:43,612 DEBUG [RS:0;2467a7071e00:43963 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:52:43,612 INFO [RS:0;2467a7071e00:43963 {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T13:52:43,612 DEBUG [RS_CLOSE_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/data/default/TestHBaseWalOnEC/ad324c097dab92f7a265da8a0675db5a/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-10T13:52:43,612 INFO [RS:0;2467a7071e00:43963 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T13:52:43,613 INFO [RS:0;2467a7071e00:43963 {}] hbase.ChoreService(370): Chore service for: regionserver/2467a7071e00:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-10T13:52:43,613 INFO [RS:0;2467a7071e00:43963 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-10T13:52:43,613 INFO [regionserver/2467a7071e00:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T13:52:43,613 INFO [RS:0;2467a7071e00:43963 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-10T13:52:43,613 INFO [RS:0;2467a7071e00:43963 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-10T13:52:43,613 INFO [RS:0;2467a7071e00:43963 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T13:52:43,613 INFO [RS:0;2467a7071e00:43963 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:43963 2024-11-10T13:52:43,614 INFO [RS_CLOSE_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731246761971.ad324c097dab92f7a265da8a0675db5a. 2024-11-10T13:52:43,614 DEBUG [RS_CLOSE_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for ad324c097dab92f7a265da8a0675db5a: Waiting for close lock at 1731246763603Running coprocessor pre-close hooks at 1731246763603Disabling compacts and flushes for region at 1731246763603Disabling writes for close at 1731246763604 (+1 ms)Writing region close event to WAL at 1731246763607 (+3 ms)Running coprocessor post-close hooks at 1731246763613 (+6 ms)Closed at 1731246763613 2024-11-10T13:52:43,614 DEBUG [RS_CLOSE_REGION-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1731246761971.ad324c097dab92f7a265da8a0675db5a. 2024-11-10T13:52:43,624 DEBUG [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/data/hbase/meta/1588230740/.tmp/info/53d39e24b1284da994b9ff43aed4d119 is 153, key is TestHBaseWalOnEC,,1731246761971.ad324c097dab92f7a265da8a0675db5a./info:regioninfo/1731246762772/Put/seqid=0 2024-11-10T13:52:43,626 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40991-0x10125051d920000, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T13:52:43,626 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43963-0x10125051d920001, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2467a7071e00,43963,1731246760644 2024-11-10T13:52:43,626 INFO [RS:0;2467a7071e00:43963 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T13:52:43,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36097 is added to blk_1073741840_1016 (size=6637) 2024-11-10T13:52:43,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741840_1016 (size=6637) 2024-11-10T13:52:43,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41263 is added to blk_1073741840_1016 (size=6637) 2024-11-10T13:52:43,631 INFO [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/data/hbase/meta/1588230740/.tmp/info/53d39e24b1284da994b9ff43aed4d119 2024-11-10T13:52:43,639 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2467a7071e00,43963,1731246760644] 2024-11-10T13:52:43,649 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2467a7071e00,43963,1731246760644 already deleted, retry=false 2024-11-10T13:52:43,650 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2467a7071e00,43963,1731246760644 expired; onlineServers=2 2024-11-10T13:52:43,653 DEBUG [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/data/hbase/meta/1588230740/.tmp/ns/10e0d90052994c6e9c50916e4876670a is 43, key is default/ns:d/1731246761928/Put/seqid=0 2024-11-10T13:52:43,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741841_1017 (size=5153) 2024-11-10T13:52:43,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36097 is added to blk_1073741841_1017 (size=5153) 2024-11-10T13:52:43,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41263 is added to blk_1073741841_1017 (size=5153) 2024-11-10T13:52:43,660 INFO [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/data/hbase/meta/1588230740/.tmp/ns/10e0d90052994c6e9c50916e4876670a 2024-11-10T13:52:43,672 INFO [regionserver/2467a7071e00:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T13:52:43,678 INFO [regionserver/2467a7071e00:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T13:52:43,678 INFO [regionserver/2467a7071e00:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T13:52:43,683 DEBUG [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/data/hbase/meta/1588230740/.tmp/table/c7c0447544ce4aef9e8b71b6742897d7 is 52, key is TestHBaseWalOnEC/table:state/1731246762793/Put/seqid=0 2024-11-10T13:52:43,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41263 is added to blk_1073741842_1018 (size=5249) 2024-11-10T13:52:43,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36097 is added to blk_1073741842_1018 (size=5249) 2024-11-10T13:52:43,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741842_1018 (size=5249) 2024-11-10T13:52:43,691 INFO [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/data/hbase/meta/1588230740/.tmp/table/c7c0447544ce4aef9e8b71b6742897d7 2024-11-10T13:52:43,701 DEBUG [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/data/hbase/meta/1588230740/.tmp/info/53d39e24b1284da994b9ff43aed4d119 as hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/data/hbase/meta/1588230740/info/53d39e24b1284da994b9ff43aed4d119 2024-11-10T13:52:43,710 INFO [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/data/hbase/meta/1588230740/info/53d39e24b1284da994b9ff43aed4d119, entries=10, sequenceid=11, filesize=6.5 K 2024-11-10T13:52:43,711 DEBUG [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/data/hbase/meta/1588230740/.tmp/ns/10e0d90052994c6e9c50916e4876670a as hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/data/hbase/meta/1588230740/ns/10e0d90052994c6e9c50916e4876670a 2024-11-10T13:52:43,720 INFO [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/data/hbase/meta/1588230740/ns/10e0d90052994c6e9c50916e4876670a, entries=2, sequenceid=11, filesize=5.0 K 2024-11-10T13:52:43,721 DEBUG [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/data/hbase/meta/1588230740/.tmp/table/c7c0447544ce4aef9e8b71b6742897d7 as hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/data/hbase/meta/1588230740/table/c7c0447544ce4aef9e8b71b6742897d7 2024-11-10T13:52:43,730 INFO [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/data/hbase/meta/1588230740/table/c7c0447544ce4aef9e8b71b6742897d7, entries=2, sequenceid=11, filesize=5.1 K 2024-11-10T13:52:43,732 INFO [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 126ms, sequenceid=11, compaction requested=false 2024-11-10T13:52:43,738 DEBUG [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-10T13:52:43,739 DEBUG [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-10T13:52:43,739 INFO [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-10T13:52:43,739 DEBUG [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731246763605Running coprocessor pre-close hooks at 1731246763605Disabling compacts and flushes for region at 1731246763605Disabling writes for close at 1731246763605Obtaining lock to block concurrent updates at 1731246763605Preparing flush snapshotting stores in 1588230740 at 1731246763605Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1731246763606 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731246763607 (+1 ms)Flushing 1588230740/info: creating writer at 1731246763607Flushing 1588230740/info: appending metadata at 1731246763623 (+16 ms)Flushing 1588230740/info: closing flushed file at 1731246763623Flushing 1588230740/ns: creating writer at 1731246763638 (+15 ms)Flushing 1588230740/ns: appending metadata at 1731246763652 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1731246763652Flushing 1588230740/table: creating writer at 1731246763667 (+15 ms)Flushing 1588230740/table: appending metadata at 1731246763683 (+16 ms)Flushing 1588230740/table: closing flushed file at 1731246763683Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4ecd8b81: reopening flushed file at 1731246763699 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1ff6443e: reopening flushed file at 1731246763710 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2d8ec637: reopening flushed file at 1731246763720 (+10 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 126ms, sequenceid=11, compaction requested=false at 1731246763732 (+12 ms)Writing region close event to WAL at 1731246763733 (+1 ms)Running coprocessor post-close hooks at 1731246763738 (+5 ms)Closed at 1731246763739 (+1 ms) 2024-11-10T13:52:43,739 DEBUG [RS_CLOSE_META-regionserver/2467a7071e00:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-10T13:52:43,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43963-0x10125051d920001, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T13:52:43,739 INFO [RS:0;2467a7071e00:43963 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T13:52:43,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43963-0x10125051d920001, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T13:52:43,739 INFO [RS:0;2467a7071e00:43963 {}] regionserver.HRegionServer(1031): Exiting; stopping=2467a7071e00,43963,1731246760644; zookeeper connection closed. 2024-11-10T13:52:43,740 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5b1914b2 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5b1914b2 2024-11-10T13:52:43,804 INFO [RS:1;2467a7071e00:33817 {}] regionserver.HRegionServer(976): stopping server 2467a7071e00,33817,1731246760683; all regions closed. 2024-11-10T13:52:43,805 INFO [RS:2;2467a7071e00:38731 {}] regionserver.HRegionServer(976): stopping server 2467a7071e00,38731,1731246760726; all regions closed. 2024-11-10T13:52:43,805 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:52:43,805 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:52:43,805 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:52:43,805 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:52:43,806 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:52:43,806 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:52:43,806 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:52:43,806 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:52:43,806 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:52:43,806 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:52:43,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41263 is added to blk_1073741836_1012 (size=2751) 2024-11-10T13:52:43,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741834_1010 (size=1298) 2024-11-10T13:52:43,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41263 is added to blk_1073741834_1010 (size=1298) 2024-11-10T13:52:43,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741836_1012 (size=2751) 2024-11-10T13:52:43,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36097 is added to blk_1073741836_1012 (size=2751) 2024-11-10T13:52:43,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36097 is added to blk_1073741834_1010 (size=1298) 2024-11-10T13:52:43,812 DEBUG [RS:2;2467a7071e00:38731 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/oldWALs 2024-11-10T13:52:43,812 INFO [RS:2;2467a7071e00:38731 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 2467a7071e00%2C38731%2C1731246760726.meta:.meta(num 1731246761845) 2024-11-10T13:52:43,812 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:52:43,812 DEBUG [RS:1;2467a7071e00:33817 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/oldWALs 2024-11-10T13:52:43,812 INFO [RS:1;2467a7071e00:33817 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 2467a7071e00%2C33817%2C1731246760683:(num 1731246761417) 2024-11-10T13:52:43,813 DEBUG [RS:1;2467a7071e00:33817 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:52:43,813 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:52:43,813 INFO [RS:1;2467a7071e00:33817 {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T13:52:43,813 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:52:43,813 INFO [RS:1;2467a7071e00:33817 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T13:52:43,813 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:52:43,813 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:52:43,813 INFO [RS:1;2467a7071e00:33817 {}] hbase.ChoreService(370): Chore service for: regionserver/2467a7071e00:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-10T13:52:43,813 INFO [RS:1;2467a7071e00:33817 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-10T13:52:43,813 INFO [RS:1;2467a7071e00:33817 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-10T13:52:43,813 INFO [regionserver/2467a7071e00:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T13:52:43,813 INFO [RS:1;2467a7071e00:33817 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-10T13:52:43,813 INFO [RS:1;2467a7071e00:33817 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T13:52:43,813 INFO [RS:1;2467a7071e00:33817 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:33817 2024-11-10T13:52:43,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41263 is added to blk_1073741835_1011 (size=93) 2024-11-10T13:52:43,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36097 is added to blk_1073741835_1011 (size=93) 2024-11-10T13:52:43,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741835_1011 (size=93) 2024-11-10T13:52:43,818 DEBUG [RS:2;2467a7071e00:38731 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/oldWALs 2024-11-10T13:52:43,818 INFO [RS:2;2467a7071e00:38731 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 2467a7071e00%2C38731%2C1731246760726:(num 1731246761417) 2024-11-10T13:52:43,819 DEBUG [RS:2;2467a7071e00:38731 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:52:43,819 INFO [RS:2;2467a7071e00:38731 {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T13:52:43,819 INFO [RS:2;2467a7071e00:38731 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T13:52:43,819 INFO [RS:2;2467a7071e00:38731 {}] hbase.ChoreService(370): Chore service for: regionserver/2467a7071e00:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-10T13:52:43,819 INFO [RS:2;2467a7071e00:38731 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T13:52:43,819 INFO [regionserver/2467a7071e00:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T13:52:43,819 INFO [RS:2;2467a7071e00:38731 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:38731 2024-11-10T13:52:43,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33817-0x10125051d920002, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2467a7071e00,33817,1731246760683 2024-11-10T13:52:43,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40991-0x10125051d920000, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T13:52:43,826 INFO [RS:1;2467a7071e00:33817 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T13:52:43,837 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38731-0x10125051d920003, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2467a7071e00,38731,1731246760726 2024-11-10T13:52:43,837 INFO [RS:2;2467a7071e00:38731 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T13:52:43,837 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$370/0x00007f5fb08f5b80@2c736c42 rejected from java.util.concurrent.ThreadPoolExecutor@6971f7a8[Terminated, pool size = 0, active threads = 0, queued tasks = 0, completed tasks = 14] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1365) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-11-10T13:52:43,847 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2467a7071e00,38731,1731246760726] 2024-11-10T13:52:43,868 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2467a7071e00,38731,1731246760726 already deleted, retry=false 2024-11-10T13:52:43,868 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2467a7071e00,38731,1731246760726 expired; onlineServers=1 2024-11-10T13:52:43,868 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2467a7071e00,33817,1731246760683] 2024-11-10T13:52:43,881 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2467a7071e00,33817,1731246760683 already deleted, retry=false 2024-11-10T13:52:43,881 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2467a7071e00,33817,1731246760683 expired; onlineServers=0 2024-11-10T13:52:43,881 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '2467a7071e00,40991,1731246760446' ***** 2024-11-10T13:52:43,881 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-10T13:52:43,881 INFO [M:0;2467a7071e00:40991 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T13:52:43,882 INFO [M:0;2467a7071e00:40991 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T13:52:43,882 DEBUG [M:0;2467a7071e00:40991 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-10T13:52:43,882 DEBUG [M:0;2467a7071e00:40991 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-10T13:52:43,882 DEBUG [master/2467a7071e00:0:becomeActiveMaster-HFileCleaner.large.0-1731246761117 {}] cleaner.HFileCleaner(306): Exit Thread[master/2467a7071e00:0:becomeActiveMaster-HFileCleaner.large.0-1731246761117,5,FailOnTimeoutGroup] 2024-11-10T13:52:43,882 DEBUG [master/2467a7071e00:0:becomeActiveMaster-HFileCleaner.small.0-1731246761117 {}] cleaner.HFileCleaner(306): Exit Thread[master/2467a7071e00:0:becomeActiveMaster-HFileCleaner.small.0-1731246761117,5,FailOnTimeoutGroup] 2024-11-10T13:52:43,882 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-10T13:52:43,882 INFO [M:0;2467a7071e00:40991 {}] hbase.ChoreService(370): Chore service for: master/2467a7071e00:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-10T13:52:43,882 INFO [M:0;2467a7071e00:40991 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T13:52:43,882 DEBUG [M:0;2467a7071e00:40991 {}] master.HMaster(1795): Stopping service threads 2024-11-10T13:52:43,882 INFO [M:0;2467a7071e00:40991 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-10T13:52:43,882 INFO [M:0;2467a7071e00:40991 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-10T13:52:43,883 INFO [M:0;2467a7071e00:40991 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-10T13:52:43,883 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-10T13:52:43,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40991-0x10125051d920000, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-10T13:52:43,892 DEBUG [M:0;2467a7071e00:40991 {}] zookeeper.ZKUtil(347): master:40991-0x10125051d920000, quorum=127.0.0.1:52921, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-10T13:52:43,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40991-0x10125051d920000, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:52:43,892 WARN [M:0;2467a7071e00:40991 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-10T13:52:43,893 INFO [M:0;2467a7071e00:40991 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/.lastflushedseqids 2024-11-10T13:52:43,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741843_1019 (size=127) 2024-11-10T13:52:43,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41263 is added to blk_1073741843_1019 (size=127) 2024-11-10T13:52:43,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36097 is added to blk_1073741843_1019 (size=127) 2024-11-10T13:52:43,901 INFO [M:0;2467a7071e00:40991 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-10T13:52:43,902 INFO [M:0;2467a7071e00:40991 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-10T13:52:43,902 DEBUG [M:0;2467a7071e00:40991 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-10T13:52:43,902 INFO [M:0;2467a7071e00:40991 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:52:43,902 DEBUG [M:0;2467a7071e00:40991 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:52:43,902 DEBUG [M:0;2467a7071e00:40991 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-10T13:52:43,902 DEBUG [M:0;2467a7071e00:40991 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:52:43,902 INFO [M:0;2467a7071e00:40991 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.82 KB heapSize=34.11 KB 2024-11-10T13:52:43,920 DEBUG [M:0;2467a7071e00:40991 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5c5e2385191a47bfbece8058a94dde75 is 82, key is hbase:meta,,1/info:regioninfo/1731246761881/Put/seqid=0 2024-11-10T13:52:43,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741844_1020 (size=5672) 2024-11-10T13:52:43,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36097 is added to blk_1073741844_1020 (size=5672) 2024-11-10T13:52:43,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41263 is added to blk_1073741844_1020 (size=5672) 2024-11-10T13:52:43,929 INFO [M:0;2467a7071e00:40991 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5c5e2385191a47bfbece8058a94dde75 2024-11-10T13:52:43,948 INFO [RS:1;2467a7071e00:33817 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T13:52:43,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33817-0x10125051d920002, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T13:52:43,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33817-0x10125051d920002, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T13:52:43,948 INFO [RS:1;2467a7071e00:33817 {}] regionserver.HRegionServer(1031): Exiting; stopping=2467a7071e00,33817,1731246760683; zookeeper connection closed. 2024-11-10T13:52:43,948 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7c62b598 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7c62b598 2024-11-10T13:52:43,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38731-0x10125051d920003, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T13:52:43,958 INFO [RS:2;2467a7071e00:38731 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T13:52:43,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38731-0x10125051d920003, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T13:52:43,958 INFO [RS:2;2467a7071e00:38731 {}] regionserver.HRegionServer(1031): Exiting; stopping=2467a7071e00,38731,1731246760726; zookeeper connection closed. 2024-11-10T13:52:43,958 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@392eeb44 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@392eeb44 2024-11-10T13:52:43,959 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-10T13:52:43,960 DEBUG [M:0;2467a7071e00:40991 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/160574f8ac2341bb8c0ba0df91e370b2 is 747, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731246762799/Put/seqid=0 2024-11-10T13:52:43,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741845_1021 (size=6438) 2024-11-10T13:52:43,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36097 is added to blk_1073741845_1021 (size=6438) 2024-11-10T13:52:43,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41263 is added to blk_1073741845_1021 (size=6438) 2024-11-10T13:52:43,969 INFO [M:0;2467a7071e00:40991 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.13 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/160574f8ac2341bb8c0ba0df91e370b2 2024-11-10T13:52:43,999 DEBUG [M:0;2467a7071e00:40991 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d41a3004d25c434ea2115f4055096863 is 69, key is 2467a7071e00,33817,1731246760683/rs:state/1731246761193/Put/seqid=0 2024-11-10T13:52:44,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36097 is added to blk_1073741846_1022 (size=5294) 2024-11-10T13:52:44,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741846_1022 (size=5294) 2024-11-10T13:52:44,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41263 is added to blk_1073741846_1022 (size=5294) 2024-11-10T13:52:44,409 INFO [M:0;2467a7071e00:40991 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d41a3004d25c434ea2115f4055096863 2024-11-10T13:52:44,423 DEBUG [M:0;2467a7071e00:40991 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5c5e2385191a47bfbece8058a94dde75 as hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5c5e2385191a47bfbece8058a94dde75 2024-11-10T13:52:44,429 INFO [M:0;2467a7071e00:40991 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5c5e2385191a47bfbece8058a94dde75, entries=8, sequenceid=72, filesize=5.5 K 2024-11-10T13:52:44,430 DEBUG [M:0;2467a7071e00:40991 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/160574f8ac2341bb8c0ba0df91e370b2 as hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/160574f8ac2341bb8c0ba0df91e370b2 2024-11-10T13:52:44,436 INFO [M:0;2467a7071e00:40991 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/160574f8ac2341bb8c0ba0df91e370b2, entries=8, sequenceid=72, filesize=6.3 K 2024-11-10T13:52:44,437 DEBUG [M:0;2467a7071e00:40991 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d41a3004d25c434ea2115f4055096863 as hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d41a3004d25c434ea2115f4055096863 2024-11-10T13:52:44,443 INFO [M:0;2467a7071e00:40991 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39813/user/jenkins/test-data/5632fbd3-61b5-5942-e87b-444fc9da13c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d41a3004d25c434ea2115f4055096863, entries=3, sequenceid=72, filesize=5.2 K 2024-11-10T13:52:44,445 INFO [M:0;2467a7071e00:40991 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.82 KB/27459, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 542ms, sequenceid=72, compaction requested=false 2024-11-10T13:52:44,446 INFO [M:0;2467a7071e00:40991 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:52:44,446 DEBUG [M:0;2467a7071e00:40991 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731246763902Disabling compacts and flushes for region at 1731246763902Disabling writes for close at 1731246763902Obtaining lock to block concurrent updates at 1731246763902Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731246763902Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27459, getHeapSize=34864, getOffHeapSize=0, getCellsCount=85 at 1731246763903 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731246763903Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731246763904 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731246763919 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731246763919Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731246763937 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731246763960 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731246763960Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731246763977 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731246763998 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731246763998Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@516e8563: reopening flushed file at 1731246764422 (+424 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3b022c8a: reopening flushed file at 1731246764429 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@54370558: reopening flushed file at 1731246764436 (+7 ms)Finished flush of dataSize ~26.82 KB/27459, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 542ms, sequenceid=72, compaction requested=false at 1731246764445 (+9 ms)Writing region close event to WAL at 1731246764446 (+1 ms)Closed at 1731246764446 2024-11-10T13:52:44,446 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:52:44,446 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:52:44,447 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:52:44,447 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:52:44,447 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:52:44,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35811 is added to blk_1073741830_1006 (size=32662) 2024-11-10T13:52:44,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41263 is added to blk_1073741830_1006 (size=32662) 2024-11-10T13:52:44,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36097 is added to blk_1073741830_1006 (size=32662) 2024-11-10T13:52:44,450 INFO [M:0;2467a7071e00:40991 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-10T13:52:44,450 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T13:52:44,450 INFO [M:0;2467a7071e00:40991 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:40991 2024-11-10T13:52:44,450 INFO [M:0;2467a7071e00:40991 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T13:52:44,603 INFO [M:0;2467a7071e00:40991 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T13:52:44,603 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40991-0x10125051d920000, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T13:52:44,603 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40991-0x10125051d920000, quorum=127.0.0.1:52921, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T13:52:44,607 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@47c8a2ee{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:52:44,608 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@323fafe0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T13:52:44,608 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T13:52:44,608 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@190023f0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T13:52:44,609 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@10a2b6be{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25/hadoop.log.dir/,STOPPED} 2024-11-10T13:52:44,611 WARN [BP-1551880796-172.17.0.3-1731246757876 heartbeating to localhost/127.0.0.1:39813 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T13:52:44,611 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T13:52:44,611 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T13:52:44,611 WARN [BP-1551880796-172.17.0.3-1731246757876 heartbeating to localhost/127.0.0.1:39813 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1551880796-172.17.0.3-1731246757876 (Datanode Uuid 4bb7a198-2c96-4eda-bb4f-2a2fd3c8e571) service to localhost/127.0.0.1:39813 2024-11-10T13:52:44,612 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25/cluster_c9a2529b-8d95-60c1-07ef-f3ab1ab8f621/data/data5/current/BP-1551880796-172.17.0.3-1731246757876 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:52:44,613 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25/cluster_c9a2529b-8d95-60c1-07ef-f3ab1ab8f621/data/data6/current/BP-1551880796-172.17.0.3-1731246757876 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:52:44,613 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T13:52:44,614 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4cfd3d1f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:52:44,615 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@d52b56f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T13:52:44,615 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T13:52:44,615 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6686fe53{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T13:52:44,615 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6ba59100{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25/hadoop.log.dir/,STOPPED} 2024-11-10T13:52:44,617 WARN [BP-1551880796-172.17.0.3-1731246757876 heartbeating to localhost/127.0.0.1:39813 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T13:52:44,617 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T13:52:44,617 WARN [BP-1551880796-172.17.0.3-1731246757876 heartbeating to localhost/127.0.0.1:39813 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1551880796-172.17.0.3-1731246757876 (Datanode Uuid 0a978caf-0fe4-44fe-a89c-628be87127cb) service to localhost/127.0.0.1:39813 2024-11-10T13:52:44,617 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T13:52:44,617 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25/cluster_c9a2529b-8d95-60c1-07ef-f3ab1ab8f621/data/data3/current/BP-1551880796-172.17.0.3-1731246757876 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:52:44,617 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25/cluster_c9a2529b-8d95-60c1-07ef-f3ab1ab8f621/data/data4/current/BP-1551880796-172.17.0.3-1731246757876 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:52:44,618 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T13:52:44,619 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4c5f5451{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:52:44,620 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4a8b9e38{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T13:52:44,620 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T13:52:44,620 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11812ea4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T13:52:44,620 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@436188c4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25/hadoop.log.dir/,STOPPED} 2024-11-10T13:52:44,621 WARN [BP-1551880796-172.17.0.3-1731246757876 heartbeating to localhost/127.0.0.1:39813 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T13:52:44,621 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T13:52:44,621 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T13:52:44,621 WARN [BP-1551880796-172.17.0.3-1731246757876 heartbeating to localhost/127.0.0.1:39813 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1551880796-172.17.0.3-1731246757876 (Datanode Uuid 514e5816-9871-4e02-a4d7-635e19b3d895) service to localhost/127.0.0.1:39813 2024-11-10T13:52:44,622 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25/cluster_c9a2529b-8d95-60c1-07ef-f3ab1ab8f621/data/data1/current/BP-1551880796-172.17.0.3-1731246757876 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:52:44,622 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25/cluster_c9a2529b-8d95-60c1-07ef-f3ab1ab8f621/data/data2/current/BP-1551880796-172.17.0.3-1731246757876 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:52:44,623 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T13:52:44,628 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7d1fffc5{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-10T13:52:44,629 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7e69ce72{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T13:52:44,629 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T13:52:44,629 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@43a7f4cb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T13:52:44,629 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@77ad49ec{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a2a8621a-a795-db63-2d1f-9ccd1f2fbf25/hadoop.log.dir/,STOPPED} 2024-11-10T13:52:44,635 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-10T13:52:44,662 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-10T13:52:44,668 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=152 (was 92) - Thread LEAK? -, OpenFileDescriptor=518 (was 445) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=105 (was 106), ProcessCount=11 (was 11), AvailableMemoryMB=6984 (was 7151)